Example #1
0
    def __init__(self, datacfg, cfgfile, weightfile):
        data_options = read_data_cfg(datacfg)
        net_options = parse_cfg(cfgfile)[0]

        trainlist = data_options['train']
        testlist = data_options['valid']
        backupdir = data_options['backup']
        nsamples = file_lines(trainlist)
        gpus = data_options['gpus']  # e.g. 0,1,2,3
        ngpus = len(gpus.split(','))
        num_workers = int(data_options['num_workers'])

        batch_size = int(net_options['batch'])
        max_batches = int(net_options['max_batches'])
        learning_rate = float(net_options['learning_rate'])
        momentum = float(net_options['momentum'])
        decay = float(net_options['decay'])
        steps = [float(step) for step in net_options['steps'].split(',')]
        scales = [float(scale) for scale in net_options['scales'].split(',')]

        #Train parameters
        max_epochs = max_batches * batch_size / nsamples + 1
        use_cuda = False  # True
        seed = 231  #int(time.time())
        eps = 1e-5
        save_interval = 10  # epoches
        dot_interval = 70  # batches

        # Test parameters
        conf_thresh = 0.25
        nms_thresh = 0.4
        iou_thresh = 0.5
Example #2
0
def get_config():
    opt = parse_opts()  # Training settings
    dataset_use = opt.dataset  # which dataset to use
    datacfg = opt.data_cfg  # path for dataset of training and validation, e.g: cfg/ucf24.data
    cfgfile = opt.cfg_file  # path for cfg file, e.g: cfg/ucf24.cfg
    # assert dataset_use == 'ucf101-24' or dataset_use == 'jhmdb-21', 'invalid dataset'

    # loss parameters
    loss_options = parse_cfg(cfgfile)[1]
    region_loss = RegionLoss()
    anchors = loss_options['anchors'].split(',')
    region_loss.anchors = [float(i) for i in anchors]
    region_loss.num_classes = int(loss_options['classes'])
    region_loss.num_anchors = int(loss_options['num'])

    return opt, region_loss
Example #3
0
    def __init__(self, cfgfile):
        super(Darknet, self).__init__()
        self.blocks = cfg.parse_cfg(cfgfile)
        self.models = self.create_network(self.blocks)  # merge conv, bn,leaky
        self.loss = self.models[len(self.models) - 1]

        self.width = int(self.blocks[0]['width'])
        self.height = int(self.blocks[0]['height'])

        if self.blocks[(len(self.blocks) - 1)]['type'] == 'region':
            self.anchors = self.loss.anchors
            self.num_anchors = self.loss.num_anchors
            self.anchor_step = self.loss.anchor_step
            self.num_classes = self.loss.num_classes

        self.header = torch.IntTensor([0, 0, 0, 0])
        self.seen = 0
Example #4
0
def set_cfg():
    c = config.parameter()

    datacfg = mFlags.data
    c.cfgfile = mFlags.config
    c.weightfile = mFlags.weights
    no_eval = mFlags.no_eval

    data_options = read_data_cfg(datacfg)
    net_options = parse_cfg(c.cfgfile)[0]

    c.use_cuda = torch.cuda.is_available() and (True if c.use_cuda is None else
                                                c.use_cuda)
    c.trainlist = data_options['train']
    c.testlist = data_options['valid']
    c.backupdir = data_options['backup']
    c.gpus = data_options['gpus']
    c.ngpus = len(c.gpus.split(','))
    c.num_workers = int(data_options['num_workers'])

    c.batch_size = int(net_options['batch'])
    c.max_batches = 10 * int(net_options['max_batches'])
    c.learning_rate = float(net_options['learning_rate'])
    c.momentum = float(net_options['momentum'])
    c.decay = float(net_options['decay'])
    c.steps = [float(step) for step in net_options['steps'].split(',')]
    c.scales = [float(scale) for scale in net_options['scales'].split(',')]

    try:
        c.max_epochs = int(net_options['max_epochs'])
    except KeyError:
        nsamples = file_lines(c.trainlist)
        c.max_epochs = (c.max_batches * c.batch_size) // nsamples + 1
    seed = int(time.time())
    torch.manual_seed(seed)
    if c.use_cuda:
        os.environ['CUDA_VISIBLE_DEVICES'] = c.gpus
        torch.cuda.manual_seed(seed)

    c.device = torch.device("cuda" if c.use_cuda else "cpu")
    print('set_cfg')

    return c
Example #5
0
        def __init__(self, cfgfile, use_cuda=False):
            super(DarkNet, self).__init__()
            self.use_cuda = use_cuda
            self.blocks = parse_cfg(cfgfile)
            self.models = self.create_network(self.blocks) # merge conv, bn,leaky
            self.loss_layers = self.getLossLayers()

            #self.width = int(self.blocks[0]['width'])
            #self.height = int(self.blocks[0]['height'])
    
            if len(self.loss_layers) > 0:
                last = len(self.loss_layers)-1
                self.anchors = self.loss_layers[last].anchors
                self.num_anchors = self.loss_layers[last].num_anchors
                self.anchor_step = self.loss_layers[last].anchor_step
                self.num_classes = self.loss_layers[last].num_classes
    
            # default format : major=0, minor=1
            self.header = torch.IntTensor([0,1,0,0])
            self.seen = 0
Example #6
0
def valid(datacfg, cfgfile, weightfile):
    def truths_length(truths):
        for i in range(50):
            if truths[i][1] == 0:
                return i

    # Parse data configuration files
    data_options = read_data_cfg(datacfg)
    valid_images = data_options['valid']
    meshname = data_options['mesh']
    name = data_options['name']
    im_width = int(data_options['im_width'])
    im_height = int(data_options['im_height'])
    fx = float(data_options['fx'])
    fy = float(data_options['fy'])
    u0 = float(data_options['u0'])
    v0 = float(data_options['v0'])

    # Parse net configuration file
    net_options = parse_cfg(cfgfile)[0]
    loss_options = parse_cfg(cfgfile)[-1]
    conf_thresh = float(net_options['conf_thresh'])
    num_keypoints = int(net_options['num_keypoints'])
    num_classes = int(loss_options['classes'])
    num_anchors = int(loss_options['num'])
    anchors = [float(anchor) for anchor in loss_options['anchors'].split(',')]

    # Read object model information, get 3D bounding box corners, get intrinsics
    mesh = MeshPly(meshname)
    vertices = np.c_[np.array(mesh.vertices),
                     np.ones((len(mesh.vertices), 1))].transpose()
    corners3D = get_3D_corners(vertices)
    diam = float(data_options['diam'])
    intrinsic_calibration = get_camera_intrinsic(u0, v0, fx,
                                                 fy)  # camera params

    # Network I/O params
    num_labels = 2 * num_keypoints + 3  # +2 for width, height, +1 for object class
    errs_2d = []  # to save
    with open(valid_images) as fp:  # validation file names
        tmp_files = fp.readlines()
        valid_files = [item.rstrip() for item in tmp_files]

    # Compute-related Parameters
    use_cuda = True  # whether to use cuda or no
    kwargs = {'num_workers': 4, 'pin_memory': True}  # number of workers etc.

    # Specicy model, load pretrained weights, pass to GPU and set the module in evaluation mode
    model = Darknet(cfgfile)
    model.load_weights(weightfile)
    model.cuda()
    model.eval()

    # Get the dataloader for the test dataset
    valid_dataset = dataset_multi.listDataset(valid_images,
                                              shape=(model.width,
                                                     model.height),
                                              shuffle=False,
                                              objclass=name,
                                              transform=transforms.Compose([
                                                  transforms.ToTensor(),
                                              ]))
    test_loader = torch.utils.data.DataLoader(valid_dataset,
                                              batch_size=1,
                                              shuffle=False,
                                              **kwargs)

    # Iterate through test batches (Batch size for test data is 1)
    logging('Testing {}...'.format(name))
    for batch_idx, (data, target) in enumerate(test_loader):

        t1 = time.time()
        # Pass data to GPU
        if use_cuda:
            data = data.cuda()
            # target = target.cuda()

        # Wrap tensors in Variable class, set volatile=True for inference mode and to use minimal memory during inference
        data = Variable(data, volatile=True)
        t2 = time.time()

        # Forward pass
        output = model(data).data
        t3 = time.time()

        # Using confidence threshold, eliminate low-confidence predictions
        trgt = target[0].view(-1, num_labels)
        all_boxes = get_multi_region_boxes(output,
                                           conf_thresh,
                                           num_classes,
                                           num_keypoints,
                                           anchors,
                                           num_anchors,
                                           int(trgt[0][0]),
                                           only_objectness=0)
        t4 = time.time()

        # Iterate through all images in the batch
        for i in range(output.size(0)):

            # For each image, get all the predictions
            boxes = all_boxes[i]

            # For each image, get all the targets (for multiple object pose estimation, there might be more than 1 target per image)
            truths = target[i].view(-1, num_labels)

            # Get how many object are present in the scene
            num_gts = truths_length(truths)

            # Iterate through each ground-truth object
            for k in range(num_gts):
                box_gt = list()
                for j in range(1, num_labels):
                    box_gt.append(truths[k][j])
                box_gt.extend([1.0, 1.0])
                box_gt.append(truths[k][0])

                # If the prediction has the highest confidence, choose it as our prediction
                best_conf_est = -sys.maxsize
                for j in range(len(boxes)):
                    if (boxes[j][2 * num_keypoints] >
                            best_conf_est) and (boxes[j][2 * num_keypoints + 2]
                                                == int(truths[k][0])):
                        best_conf_est = boxes[j][2 * num_keypoints]
                        box_pr = boxes[j]
                        match = corner_confidence(
                            box_gt[:2 * num_keypoints],
                            torch.FloatTensor(boxes[j][:2 * num_keypoints]))

                # Denormalize the corner predictions
                corners2D_gt = np.array(np.reshape(box_gt[:2 * num_keypoints],
                                                   [-1, 2]),
                                        dtype='float32')
                corners2D_pr = np.array(np.reshape(box_pr[:2 * num_keypoints],
                                                   [-1, 2]),
                                        dtype='float32')
                corners2D_gt[:, 0] = corners2D_gt[:, 0] * im_width
                corners2D_gt[:, 1] = corners2D_gt[:, 1] * im_height
                corners2D_pr[:, 0] = corners2D_pr[:, 0] * im_width
                corners2D_pr[:, 1] = corners2D_pr[:, 1] * im_height
                corners2D_gt_corrected = fix_corner_order(
                    corners2D_gt)  # Fix the order of corners

                # Compute [R|t] by pnp
                objpoints3D = np.array(np.transpose(
                    np.concatenate((np.zeros((3, 1)), corners3D[:3, :]),
                                   axis=1)),
                                       dtype='float32')
                K = np.array(intrinsic_calibration, dtype='float32')
                R_gt, t_gt = pnp(objpoints3D, corners2D_gt_corrected, K)
                R_pr, t_pr = pnp(objpoints3D, corners2D_pr, K)

                # Compute pixel error
                Rt_gt = np.concatenate((R_gt, t_gt), axis=1)
                Rt_pr = np.concatenate((R_pr, t_pr), axis=1)
                proj_2d_gt = compute_projection(vertices, Rt_gt,
                                                intrinsic_calibration)
                proj_2d_pred = compute_projection(vertices, Rt_pr,
                                                  intrinsic_calibration)
                proj_corners_gt = np.transpose(
                    compute_projection(corners3D, Rt_gt,
                                       intrinsic_calibration))
                proj_corners_pr = np.transpose(
                    compute_projection(corners3D, Rt_pr,
                                       intrinsic_calibration))
                norm = np.linalg.norm(proj_2d_gt - proj_2d_pred, axis=0)
                pixel_dist = np.mean(norm)
                errs_2d.append(pixel_dist)

        t5 = time.time()

    # Compute 2D projection score
    eps = 1e-5
    for px_threshold in [5, 10, 15, 20, 25, 30, 35, 40, 45, 50]:
        acc = len(np.where(np.array(errs_2d) <= px_threshold)[0]) * 100. / (
            len(errs_2d) + eps)
        # Print test statistics
        logging('   Acc using {} px 2D Projection = {:.2f}%'.format(
            px_threshold, acc))
Example #7
0
def main():
    datacfg = FLAGS.data
    cfgfile = FLAGS.config
    weightfile = FLAGS.weights
    # eval    = FLAGS.eval
    continuetrain = FLAGS.continuetrain
    adaptation = FLAGS.adaptation
    layerwise = FLAGS.layerwise
    max_epochs = FLAGS.epoch
    # condition = FLAGS.condition

    data_options = read_data_cfg(datacfg)
    net_options = parse_cfg(cfgfile)[0]

    global use_cuda
    use_cuda = torch.cuda.is_available() and (True if use_cuda is None else
                                              use_cuda)
    globals()["trainlist"] = data_options['train']
    globals()["testlist"] = data_options['valid']
    globals()["classname"] = data_options['names']
    globals()["backupdir"] = data_options['backup']
    globals()["gpus"] = data_options['gpus']  # e.g. 0,1,2,3
    globals()["ngpus"] = len(gpus.split(','))
    globals()["num_workers"] = int(data_options['num_workers'])
    globals()["batch_size"] = int(net_options['batch'])
    globals()["max_batches"] = int(net_options['max_batches'])
    globals()["burn_in"] = int(net_options['burn_in'])
    # globals()["learning_rate"] = float(net_options['learning_rate'])
    globals()["momentum"] = float(net_options['momentum'])
    globals()["decay"] = float(net_options['decay'])
    globals()["steps"] = [
        int(step) for step in net_options['steps'].split(',')
    ]
    globals()["scales"] = [
        float(scale) for scale in net_options['scales'].split(',')
    ]

    learning_rate = float(net_options['learning_rate'])
    try:
        globals()["backupdir"] = data_options['backup']
    except:
        globals()["backupdir"] = 'backup'

    if not os.path.exists(backupdir):
        os.mkdir(backupdir)

    try:
        globals()["logfile"] = data_options['logfile']
    except:
        globals()["logfile"] = 'backup/logfile.txt'

    try:
        globals()["condition"] = bool(net_options['condition'])
    except:
        globals()["condition"] = False

    seed = int(time.time())
    torch.manual_seed(seed)
    if use_cuda:
        os.environ['CUDA_VISIBLE_DEVICES'] = gpus
        torch.cuda.manual_seed(seed)

    global device
    device = torch.device("cuda" if use_cuda else "cpu")

    global model
    model = Darknet(cfgfile, use_cuda=use_cuda)

    model.print_network()
    nsamples = file_lines(trainlist)
    #initialize the model
    if FLAGS.reset:
        model.seen = 0
        init_epoch = 0
    else:
        init_epoch = model.seen // nsamples
    iterates = 0
    savelog('# Defaults configurations:')
    savelog('# Trainlist %s, Testlist %s' % (trainlist, testlist))
    savelog('# Cfg file: %s' % (cfgfile))
    savelog('# Max_epoch: %d, batchsize: %d, burn_in %d , Learning rate: %e' %
            (max_epochs, batch_size, burn_in, learning_rate))
    savelog('# Image size (width and height): %d x %d' %
            (model.width, model.height))
    savelog('# Step: %s' % (steps))
    if condition:
        savelog('# Training with conditioning_net = %d ' % (condition))
    if adaptation > 0:
        savelog('# Training Adaptation the first %d layers' % (adaptation))
    if layerwise > 0:
        savelog('# Training Layerwise every %d layers ' % (layerwise))

    if weightfile is not None:
        model.load_weights(weightfile)
        savelog('# Load weight file from %s' % (weightfile))
        continuetrain = None

    if continuetrain is not None:
        checkpoint = torch.load(continuetrain)
        model.load_state_dict(checkpoint['state_dict'])
        try:
            init_epoch = int(continuetrain.split('.')[0][-2:])
        except:
            logging(
                'Warning!!! Continuetrain file must has at least 2 number at the end indicating last epoch'
            )
        iterates = init_epoch * (nsamples / batch_size)
        savelog('# Training continue from model %s' % continuetrain)
        savelog('# Training starting from %d epoch with %d iterates ' %
                (init_epoch, iterates))

    global loss_layers
    loss_layers = model.loss_layers
    for l in loss_layers:
        l.seen = model.seen

    if use_cuda:
        if ngpus > 1:
            model = torch.nn.DataParallel(model).to(device)
            logging('Use CUDA train on %s GPUs' % (gpus))
        else:
            model = model.to(device)
            logging('Use CUDA train only 1 GPU')

    params_dict = dict(model.named_parameters())
    params = []
    for key, value in params_dict.items():
        if key.find('.bn') >= 0 or key.find('.bias') >= 0:
            params += [{'params': [value], 'weight_decay': 0.0}]
        else:
            params += [{'params': [value], 'weight_decay': decay * batch_size}]
    global optimizer
    optimizer = optim.SGD(model.parameters(),
                          lr=learning_rate / batch_size,
                          momentum=momentum,
                          dampening=0,
                          weight_decay=decay * batch_size)
    savelog(
        '# Optimizer: SGD with learning rate: %f momentum %f weight_decay %f' %
        (learning_rate / batch_size, momentum, decay * batch_size))
    # optimizer = optim.Adam(model.parameters(),lr=learning_rate)

    if continuetrain is not None:
        checkpoint = torch.load(continuetrain)
        optimizer.load_state_dict(checkpoint['optimizer'])

    if adaptation > 0:
        savelog('# Training Segment Adaptation: ')
        freeze_weight_adaptation(adaptation)

    # global train_dataset, valid_dataset
    global train_dataset, valid_dataset

    cur_loss, best_loss = 0.0, 10000.0
    best_pre, cur_pre, cur_rec, best_rec = 0.0, 0.0, 0.0, 0.0
    lr_time, loss_time, rec_time = 0, 0, 0

    try:

        savelog("# Training for ({:d},{:d})".format(init_epoch + 1,
                                                    max_epochs))
        for epoch in range(init_epoch + 1, max_epochs + 1):
            ### Split trainsampler and validsampler from the trainset.
            train_sampler, valid_sampler = get_train_valid_sampler()
            if condition:
                iterates, cur_loss = train_conditioning(
                    epoch, iterates, train_sampler)
            else:
                ### This is for layerwise and normally training.
                if layerwise > 0:
                    layerwise = update_weight_layerwise(epoch, layerwise)

                iterates, cur_loss = train(epoch, iterates, train_sampler,
                                           adaptation, layerwise)

            ### validate
            if cur_loss < 100:
                cur_fscore, cur_pre, cur_rec = PR_Valid(epoch, valid_sampler)

            savelog(
                "%d trainloss: %f fscore: %.4f precision: %.4f recall: %.4f" %
                (epoch, cur_loss, cur_fscore, cur_rec, cur_rec))
            savemodel(epoch, cfgfile, cur_pre, True)

            ### This is important procedure we invent for monitor training procedure, reduce waiting time.
            ### The idea is that if the network doesn't learn (loss increase), and valid precision too, --> derease lr.
            ### changing lr by precision (check fast training procedure)

            if ((adaptation > 0) or (layerwise > 0)):
                adaptation, layerwise, best_loss, loss_time, best_rec = check_update_require_grad(
                    adaptation, layerwise, cur_loss, best_loss, loss_time,
                    cur_rec, best_rec)
            else:
                best_loss, loss_time, best_rec, rec_time, learning_rate, lr_time = check_change_lr(
                    cur_loss, best_loss, loss_time, cur_rec, best_rec,
                    rec_time, learning_rate, lr_time)
                # learning_rate = change_lr_by_epoch(epoch, learning_rate)
            logging('-' * 90)

    except KeyboardInterrupt:
        logging('=' * 80)
        logging('Exiting from training by interrupt')
Example #8
0
    return img


if __name__ == '__main__':
    #datacfg = 'cfg/ape.data'
    modelcfg = 'multi_obj_pose_estimation/cfg/yolo-pose-multi.cfg'
    weightfile = '../Assets/trained/multi.weights'

    #模型初始化
    model = Darknet(modelcfg)
    model.load_weights(weightfile)
    model = model.cuda()
    model.eval()

    #加载模型用
    net_options = parse_cfg(modelcfg)[0]
    loss_options = parse_cfg(modelcfg)[-1]

    conf_thresh = float(net_options['conf_thresh'])
    num_keypoints = int(net_options['num_keypoints'])
    num_classes = int(loss_options['classes'])
    num_anchors = int(loss_options['num'])
    anchors = [float(anchor) for anchor in loss_options['anchors'].split(',')]
    test_width = 416
    test_height = 416

    datasetPath = '../Assets/DataSets/LINEMOD/'
    datasetImagePaths = datasetPath + 'benchvise/JPEGImages/'
    outputPath = '../Assets/Outputs/Multi/'

    labelFolders = list()
Example #9
0
                        cls_conf = box[5 + 2 * j]
                        cls_id = box[6 + 2 * j]
                        prob = det_conf * cls_conf
                        fps[i].write('%s %f %f %f %f %f\n' %
                                     (imgid, prob, x1, y1, x2, y2))

    for i in range(n_cls):
        fps[i].close()

    # import pdb; pdb.set_trace()

if __name__ == '__main__':
    import sys
    if len(sys.argv) in [5, 6, 7, 8]:
        datacfg = sys.argv[1]
        darknet = parse_cfg(sys.argv[2])
        learnet = parse_cfg(sys.argv[3])
        weightfile = sys.argv[4]
        traindict = sys.argv[5]
        if len(sys.argv) >= 7:
            gpu = sys.argv[6]
        else:
            gpu = '0'
        if len(sys.argv) == 8:
            use_baserw = True
        else:
            use_baserw = False

        data_options = read_data_cfg(datacfg)
        net_options = darknet[0]
        meta_options = learnet[0]
Example #10
0
                        # fps[cls_id].write('%s %f %f %f %f %f %f\n' % (fileId, det_conf, cls_conf, x1, y1, x2, y2))

    for i in range(m.num_classes):
        fps[i].close()


if __name__ == '__main__':
    import sys
    if len(sys.argv) == 4 or len(sys.argv) == 5:
        datacfg = sys.argv[1]
        cfgfile = sys.argv[2]
        weightfile = sys.argv[3]
        if len(sys.argv) == 5:
            gpu = sys.argv[4]
        else:
            gpu = '0'

        data_options = read_data_cfg(datacfg)
        net_options = parse_cfg(cfgfile)[0]
        data_options['gpus'] = gpu
        os.environ['CUDA_VISIBLE_DEVICES'] = gpu
        # Configure options
        cfg.config_data(data_options)
        cfg.config_net(net_options)

        outfile = 'comp4_det_test_'
        valid(datacfg, cfgfile, weightfile, outfile)
    else:
        print('Usage:')
        print(' python valid.py datacfg cfgfile weightfile')
Example #11
0
def main():
    datacfg = "cfg/c.data"  #FLAGS.data
    cfgfile = "cfg/model_structure.cfg"  #FLAGS.config
    loc_cfg = "cfg/setting.config"
    main_cfg = "../../Training.config"

    localmax = False  # FLAGS.localmax
    no_eval = False  #FLAGS.no_eval
    init_eval = False  #FLAGS.init_eval

    data_options = read_data_cfg(datacfg)
    main_options = read_data_cfg(main_cfg)
    loc_options = read_data_cfg(loc_cfg)
    net_options = parse_cfg(cfgfile)[0]

    global use_cuda
    use_cuda = torch.cuda.is_available() and (True if use_cuda is None else
                                              use_cuda)
    repo = reportObj
    globals()["trainlist"] = main_options['train']  #data_options['train']
    globals()["testlist"] = main_options['valid']  #data_options['valid']
    globals()["names"] = main_options['names']
    print(main_options['names'])
    modify_nn_file(main_options['names'])

    globals()["image_folder"] = main_options['image_folder']
    globals()["log_folder"] = repo["log_folder"] = main_options['log_folder']
    globals(
    )["report_folder"] = repo["report_folder"] = main_options['report_folder']

    globals()["backupdir"] = data_options['backup']
    globals()["gpus"] = data_options['gpus']  # e.g. 0,1,2,3
    globals()["ngpus"] = len(gpus.split(','))
    globals()["num_workers"] = int(data_options['num_workers'])

    globals()["batch_size"] = repo["batch_size"] = int(main_options['batch'])
    globals()["max_batches"] = int(main_options['max_batches'])
    globals()["learning_rate"] = repo["learning_rate"] = float(
        main_options['learning_rate'])
    globals()["momentum"] = float(net_options['momentum'])
    globals()["decay"] = float(net_options['decay'])
    globals()["steps"] = [
        float(step) for step in net_options['steps'].split(',')
    ]
    globals()["scales"] = [
        float(scale) for scale in net_options['scales'].split(',')
    ]

    training_proc_reset = eval(loc_options['training_process_init'])
    setwdata = eval(loc_options['setwdata'])
    weightfiles = [
        int(os.path.split(f)[1].replace('.weights', '')) for f in glob.glob(
            os.path.join(loc_options['weightfolder'], '*.weights'))
    ]
    #latest_weightfile
    weightfile = loc_options[
        'weightfile'] if training_proc_reset else os.path.join(
            loc_options['weightfolder'],
            str(max(weightfiles)).rjust(6, '0') + ".weights")  # FLAGS.weights
    globals()["wdata"] = loc_options['wdata'] if setwdata else None
    globals()["odw"] = int(
        loc_options["origd_weighting"]) if setwdata else None
    repo["model_name"] = "YOLOv3"
    repo["version"] = "v4.1"
    print(trainlist)
    print(testlist)
    print(wdata)
    print(weightfile)
    #print(p)
    global max_epochs
    try:
        max_epochs = int(main_options['max_epochs'])
    except KeyError:
        nsamples = file_lines(trainlist)
        max_epochs = (max_batches * batch_size) // nsamples + 1

    seed = int(time.time())
    torch.manual_seed(seed)
    if use_cuda:
        os.environ['CUDA_VISIBLE_DEVICES'] = gpus
        torch.cuda.manual_seed(seed)
    global device
    device = torch.device("cuda" if use_cuda else "cpu")

    global model
    model = Darknet(cfgfile, use_cuda=use_cuda)
    if weightfile is not None:
        model.load_weights(weightfile)
    else:
        model.load_weights('weights/pretrained_weight/yolov3.weights')

    #model.print_network()
    model_summary(model)
    #print(p)
    nsamples = repo["training_size"] = file_lines(trainlist)
    repo["validation_size"] = file_lines(testlist)
    #initialize the model
    if training_proc_reset:
        model.seen = 0
        init_epoch = 0
    else:
        init_epoch = model.seen // nsamples

    global loss_layers
    loss_layers = model.loss_layers
    for l in loss_layers:
        l.seen = model.seen

    globals()["test_loader"] = load_testlist(testlist)
    if use_cuda:
        if ngpus > 1:
            model = torch.nn.DataParallel(model).to(device)
        else:
            model = model.to(device)

    params_dict = dict(model.named_parameters())
    params = []
    for key, value in params_dict.items():
        if key.find('.bn') >= 0 or key.find('.bias') >= 0:
            params += [{'params': [value], 'weight_decay': 0.0}]
        else:
            params += [{'params': [value], 'weight_decay': decay * batch_size}]
    global optimizer
    #optimizer = optim.SGD(model.parameters(), lr=learning_rate/batch_size, momentum=momentum,  dampening=0, weight_decay=decay*batch_size)
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

    if evaluate:
        logging('evaluating ...')
        test(0)
    else:
        try:
            print("Training for ({:d},{:d})".format(init_epoch + 1,
                                                    max_epochs))
            print("Making Starting Report...")
            reporting(repo)

            fscore = 0
            correct = 0
            if init_eval and not no_eval and init_epoch > test_interval:
                print('>> initial evaluating ...')
                mcorrect, mfscore = test(init_epoch)
                print('>> done evaluation.')
            else:
                mfscore = 0.5
                mcorrect = 0
            t1 = time.time()
            for cnt, epoch in enumerate(range(init_epoch + 1, max_epochs + 1)):
                print(f"epoch number: {epoch}")
                nsamples = train(epoch)
                if epoch % save_interval == 0:
                    savemodel(epoch, nsamples)
                if not no_eval and epoch >= test_interval and (
                        epoch % test_interval) == 0:
                    print('>> interim evaluating ...')
                    correct, fscore = test(epoch)
                    print('>> done evaluation.')
                if localmax and correct > mcorrect:
                    mfscore = fscore
                    mcorrect = correct
                    savemodel(epoch, nsamples, True)
                t2 = time.time()
                reportObj['avg_epoch_time'] = str(
                    round((t2 - t1) / (cnt + 1), 2)).ljust(9, ' ')
                print('-' * 90)

        except KeyboardInterrupt:
            print('=' * 80)
            print('Exiting from training by interrupt')

        finally:
            print("Making End Report...")
            reporting(repo, True)
Example #12
0
 def __init__(self, cfgfile, train=True):
     super(Darknet, self).__init__()
     self.blocks = parse_cfg(cfgfile)
     self.net_info, self.module_list = create_modules(self.blocks)
     self.training = train
def run():

    logger = logging.getLogger()

    # Parse command window input
    parser = argparse.ArgumentParser(description='SingleShotPose')
    parser.add_argument('--datacfg', type=str,
                        default='cfg/ape.data')  # data config
    parser.add_argument('--modelcfg', type=str,
                        default='cfg/yolo-pose.cfg')  # network config
    parser.add_argument(
        '--initweightfile', type=str,
        default='backup/init.weights')  # initialization weights
    parser.add_argument('--pretrain_num_epochs', type=int,
                        default=0)  # how many epoch to pretrain
    args = parser.parse_args()
    datacfg = args.datacfg
    modelcfg = args.modelcfg
    initweightfile = args.initweightfile
    pretrain_num_epochs = args.pretrain_num_epochs

    print("ARGS: ", args)

    # Parse data configuration file
    data_options = read_data_cfg(datacfg)
    trainlist = data_options['valid']
    gpus = data_options['gpus']
    num_workers = int(data_options['num_workers'])
    backupdir = data_options['backup']
    im_width = int(data_options['width'])
    im_height = int(data_options['height'])
    fx = float(data_options['fx'])
    fy = float(data_options['fy'])
    u0 = float(data_options['u0'])
    v0 = float(data_options['v0'])

    print("DATA OPTIONS: ", data_options)

    # Parse network and training configuration parameters
    net_options = parse_cfg(modelcfg)[0]
    loss_options = parse_cfg(modelcfg)[-1]
    batch_size = int(net_options['batch'])
    max_batches = int(net_options['max_batches'])
    max_epochs = int(net_options['max_epochs'])
    learning_rate = float(net_options['learning_rate'])
    momentum = float(net_options['momentum'])
    decay = float(net_options['decay'])
    conf_thresh = float(net_options['conf_thresh'])
    num_keypoints = int(net_options['num_keypoints'])
    num_classes = int(loss_options['classes'])
    num_anchors = int(loss_options['num'])
    steps = [float(step) for step in net_options['steps'].split(',')]
    scales = [float(scale) for scale in net_options['scales'].split(',')]
    # anchors       = [float(anchor) for anchor in loss_options['anchors'].split(',')]

    print("NET OPTIONS: ", net_options)
    print("LOSS OPTIONS: ", loss_options)

    # Specifiy the model and the loss
    model = Darknet(modelcfg)

    # # Model settings
    model.load_weights(initweightfile)
    model.print_network()
    # model.seen        = 0
    # processed_batches = model.seen/batch_size
    init_width = 416  # model.width
    init_height = 416  # model.height
    batch_size = 1
    num_workers = 0

    # print("Size: ", init_width, init_height)

    bg_file_names = get_all_files('../VOCdevkit/VOC2012/JPEGImages')
    # Specify the number of workers
    use_cuda = True
    kwargs = {
        'num_workers': num_workers,
        'pin_memory': True
    } if use_cuda else {}

    logger.info("Loading data")

    # valid_dataset = dataset_multi.listDataset("../LINEMOD/duck/test_occlusion.txt", shape=(init_width, init_height),
    #                                             shuffle=False,
    #                                             objclass="duck",
    #                                             transform=transforms.Compose([
    #                                                 transforms.ToTensor(),
    #                                             ]))

    # Get the dataloader for training dataset

    dataloader = torch.utils.data.DataLoader(dataset.listDataset(
        trainlist,
        shape=(init_width, init_height),
        shuffle=False,
        transform=transforms.Compose([
            transforms.ToTensor(),
        ]),
        train=False,
        seen=0,
        batch_size=batch_size,
        num_workers=num_workers,
        bg_file_names=bg_file_names),
                                             batch_size=batch_size,
                                             shuffle=False,
                                             **kwargs)

    model.cuda()
    model.eval()

    delay = {True: 0, False: 1}
    paused = True

    # print("Classes in dataset ", num_classes)
    print("Batches in dataloader: ", len(dataloader))
    tbar = tqdm(dataloader, ascii=True, dynamic_ncols=True)
    for ii, s in enumerate(tbar):
        images, targets = s
        # print(ii, "IMAGES:" , images.shape)
        # print(ii, "TARGET\n", targets.shape)
        bs = images.shape[0]
        t = targets.cpu().numpy().reshape(bs, 50, -1)
        # print("TARGET [0, 0:1] \n", t[0, :1])
        # print("CLASSES ", t[0, :, 0])

        images_gpu = images.cuda()

        model_out = model(images_gpu).detach()
        all_boxes = np.array(
            get_region_boxes(model_out,
                             num_classes,
                             num_keypoints,
                             anchor_dim=num_anchors)).reshape(
                                 batch_size, 1, -1)

        # print("Model OUT", all_boxes.shape)

        pred = np.zeros_like(all_boxes)
        pred[:, 0, 0] = all_boxes[:, 0, -1]
        pred[:, 0, 1:-2] = all_boxes[:, 0, :-3]

        viz = visualize_results(images, t, pred, img_size=416, show_3d=True)

        cv2.imshow("Res ", viz)

        k = cv2.waitKey(delay[paused])
        if k & 0xFF == ord('q'):
            break
        if k & 0xFF == ord('p'):
            paused = not paused
Example #14
0
    def __init__(self, dataFile, cfgFile, weightFile=None):

        # Training settings
        self.datacfg = dataFile
        self.cfgfile = cfgFile
        self.weightfile = weightFile

        self.data_options = read_data_cfg(self.datacfg)
        self.net_options = parse_cfg(self.cfgfile)[0]

        self.trainlist = self.data_options['train']
        self.testlist = self.data_options['valid']
        self.backupdir = self.data_options['backup']
        self.nsamples = file_lines(self.trainlist)
        self.gpus = self.data_options['gpus']  # e.g. 0,1,2,3
        self.ngpus = len(self.gpus.split(','))
        self.num_workers = int(self.data_options['num_workers'])

        self.batch_size = int(self.net_options['batch'])
        self.max_batches = int(self.net_options['max_batches'])
        self.learning_rate = float(self.net_options['learning_rate'])
        self.momentum = float(self.net_options['momentum'])
        self.decay = float(self.net_options['decay'])
        self.steps = [
            float(step) for step in self.net_options['steps'].split(',')
        ]
        self.scales = [
            float(scale) for scale in self.net_options['scales'].split(',')
        ]

        # Train parameters
        self.max_epochs = self.max_batches * self.batch_size / self.nsamples + 1
        self.use_cuda = True
        self.seed = int(time.time())
        self.eps = 1e-5
        self.save_interval = 1  # epoches
        self.dot_interval = 70  # batches

        self.max_epochs = 200

        # Test parameters
        self.conf_thresh = 0.25
        self.nms_thresh = 0.4
        self.iou_thresh = 0.5

        if not os.path.exists(self.backupdir):
            os.mkdir(self.backupdir)

        ###############
        torch.manual_seed(self.seed)
        if self.use_cuda:
            os.environ['CUDA_VISIBLE_DEVICES'] = self.gpus
            torch.cuda.manual_seed(self.seed)

        self.model = Darknet(parse_cfg(self.cfgfile))
        self.region_loss = self.model.loss

        if self.weightfile != None:
            self.model.load_weights(self.weightfile)

        self.model.print_network()

        self.region_loss.seen = self.model.seen
        self.processed_batches = self.model.seen / self.batch_size

        self.init_width = self.model.width
        self.init_height = self.model.height
        self.init_epoch = self.model.seen / self.nsamples

        self.kwargs = {
            'num_workers': self.num_workers,
            'pin_memory': True
        } if self.use_cuda else {}
        self.test_loader = torch.utils.data.DataLoader(
            dataset.listDataset(self.testlist,
                                shape=(self.init_width, self.init_height),
                                shuffle=False,
                                transform=transforms.Compose([
                                    transforms.ToTensor(),
                                ]),
                                train=False),
            batch_size=self.batch_size,
            shuffle=False,
            **self.kwargs)

        if self.use_cuda:
            if self.ngpus > 1:
                self.model = torch.nn.DataParallel(self.model).cuda()
            else:
                self.model = self.model.cuda()

        params_dict = dict(self.model.named_parameters())
        params = []
        for key, value in params_dict.items():
            if key.find('.bn') >= 0 or key.find('.bias') >= 0:
                params += [{'params': [value], 'weight_decay': 0.0}]
            else:
                params += [{
                    'params': [value],
                    'weight_decay': self.decay * self.batch_size
                }]

        self.optimizer = optim.SGD(self.model.parameters(),
                                   lr=self.learning_rate / self.batch_size,
                                   momentum=self.momentum,
                                   dampening=0,
                                   weight_decay=self.decay * self.batch_size)
def main():
    ''' simple starter program that can be copied for use when starting a new script. '''
    logging_format = '%(asctime)s %(levelname)s:%(name)s:%(message)s'
    logging_datefmt = '%Y-%m-%d %H:%M:%S'
    logging.basicConfig(level=logging.INFO,
                        format=logging_format,
                        datefmt=logging_datefmt)

    parser = argparse.ArgumentParser(description='')
    parser.add_argument('-c', '--config_file', help='input', required=True)
    parser.add_argument(
        '--num_files',
        '-n',
        default=-1,
        type=int,
        help='limit the number of files to process. default is all')
    parser.add_argument(
        '--model_save',
        default='model_saves',
        help='base name of saved model parameters for later loading')
    parser.add_argument(
        '-i',
        '--input_model_pars',
        help=
        'if provided, the file will be used to fill the models state dict from a previous run.'
    )

    parser.add_argument('--debug',
                        dest='debug',
                        default=False,
                        action='store_true',
                        help="Set Logger to DEBUG")
    parser.add_argument('--error',
                        dest='error',
                        default=False,
                        action='store_true',
                        help="Set Logger to ERROR")
    parser.add_argument('--warning',
                        dest='warning',
                        default=False,
                        action='store_true',
                        help="Set Logger to ERROR")
    parser.add_argument('--logfilename',
                        dest='logfilename',
                        default=None,
                        help='if set, logging information will go to file')
    args = parser.parse_args()

    if args.debug and not args.error and not args.warning:
        # remove existing root handlers and reconfigure with DEBUG
        for h in logging.root.handlers:
            logging.root.removeHandler(h)
        logging.basicConfig(level=logging.DEBUG,
                            format=logging_format,
                            datefmt=logging_datefmt,
                            filename=args.logfilename)
        logger.setLevel(logging.DEBUG)
    elif not args.debug and args.error and not args.warning:
        # remove existing root handlers and reconfigure with ERROR
        for h in logging.root.handlers:
            logging.root.removeHandler(h)
        logging.basicConfig(level=logging.ERROR,
                            format=logging_format,
                            datefmt=logging_datefmt,
                            filename=args.logfilename)
        logger.setLevel(logging.ERROR)
    elif not args.debug and not args.error and args.warning:
        # remove existing root handlers and reconfigure with WARNING
        for h in logging.root.handlers:
            logging.root.removeHandler(h)
        logging.basicConfig(level=logging.WARNING,
                            format=logging_format,
                            datefmt=logging_datefmt,
                            filename=args.logfilename)
        logger.setLevel(logging.WARNING)
    else:
        # set to default of INFO
        for h in logging.root.handlers:
            logging.root.removeHandler(h)
        logging.basicConfig(level=logging.INFO,
                            format=logging_format,
                            datefmt=logging_datefmt,
                            filename=args.logfilename)

    blocks = cfg.parse_cfg(args.config_file)
    net = Net(blocks)
    net.double()

    if args.input_model_pars:
        net.load_state_dict(torch.load(args.input_model_pars))

    net_opts = blocks[0]

    trainlist, validlist = get_filelist(net_opts, args)

    trainds = BatchGenerator(trainlist, net_opts)
    batch_size = int(net_opts['batch'])
    validds = BatchGenerator(validlist, net_opts)

    cfg.print_cfg(blocks)

    optimizer = optim.SGD(net.parameters(),
                          lr=float(net_opts['learning_rate']),
                          momentum=float(net_opts['momentum']))

    accuracyCalc = loss_func.ClassOnlyAccuracy()

    batch_time_sum = 0
    batch_time_sum2 = 0
    batch_time_n = 0

    for epoch in range(2):
        logger.info(' epoch %s', epoch)

        batch_counter = 0

        file_indices = np.array(range(len(trainlist)))
        np.random.shuffle(file_indices)

        for file_index in np.nditer(file_indices):

            for batch_index in range(trainds.batches_per_file):

                data = trainds.get_batch(file_index, batch_index)

                start = time.time()
                # logger.info('batch_counter: %s',batch_counter)
                inputs = data['images']
                targets = data['truth']

                # logger.info('retrieved data shape: %s',inputs.shape)
                # logger.info('retrieved truth shape: %s',targets.shape)

                optimizer.zero_grad()
                net.train()
                outputs = net(inputs)

                loss = net.loss(outputs, targets)

                forward = time.time()
                logger.info('forward pass: %6.2f', forward - start)

                loss.backward()
                optimizer.step()

                end = time.time()
                logger.info('backward pass: %6.2f', end - forward)

                timediff = end - start
                batch_time_sum += timediff
                batch_time_sum2 += timediff * timediff
                batch_time_n += 1

                # print statistics
                if True:  #batch_counter % 1 == 0:
                    mean_time = batch_time_sum / batch_time_n / batch_size
                    logger.info('[%3d, %5d] loss: %.3f sec/image: %6.2f',
                                epoch + 1, batch_counter + 1, loss.item(),
                                mean_time)

                if batch_counter % 10 == 9:
                    net.eval()

                    for i in range(1):

                        data = validds.get_next_batch()
                        inputs = data['images']
                        targets = data['truth']
                        outputs = net(inputs)
                        acc = accuracyCalc.eval_acc(outputs, targets)

                        loss = net.loss(outputs, targets)
                        logger.info('valid loss: %10.3f accuracy: %10.3f',
                                    loss.item(), acc)

                batch_counter += 1

                if batch_counter % 10 == 9:
                    torch.save(
                        net.state_dict(),
                        args.model_save + '_%05d_%05d.torch_model_state_dict' %
                        (epoch, batch_counter))
Example #16
0
                raise NotImplementedError("Image path note recognized!")

        return labpath


if __name__ == '__main__':
    from utils import read_data_cfg
    from cfg import parse_cfg
    from torchvision import transforms

    datacfg = 'cfg/metayolo.data'
    netcfg = 'cfg/dynamic_darknet_last.cfg'
    metacfg = 'cfg/learnet_last.cfg'

    data_options  = read_data_cfg(datacfg)
    net_options   = parse_cfg(netcfg)[0]
    meta_options  = parse_cfg(metacfg)[0]

    cfg.config_data(data_options)
    cfg.config_meta(meta_options)
    cfg.config_net(net_options)
    cfg.num_gpus = 4

    metafiles = 'data/voc_metadict1_full.txt'
    #trainlist = '/data/datasets/PascalVOC/voc_train.txt'

    metaset = MetaDataset(metafiles=metafiles, train=True)
    metaloader = torch.utils.data.DataLoader(
        metaset,
        batch_size=metaset.batch_size,
        shuffle=False,
Example #17
0
def main():
    datacfg = FLAGS.data
    cfgfile = FLAGS.config
    weightfile = FLAGS.weights

    data_options = read_data_cfg(datacfg)
    net_options = parse_cfg(cfgfile)[0]

    global use_cuda
    use_cuda = torch.cuda.is_available() and (True if use_cuda is None else
                                              use_cuda)

    globals()["trainlist"] = data_options['train']
    globals()["testlist"] = data_options['valid']
    globals()["backupdir"] = data_options['backup']
    globals()["gpus"] = data_options['gpus']  # e.g. 0,1,2,3
    globals()["ngpus"] = len(gpus.split(','))
    globals()["num_workers"] = int(data_options['num_workers'])

    globals()["batch_size"] = int(net_options['batch'])
    globals()["max_batches"] = int(net_options['max_batches'])
    globals()["learning_rate"] = float(net_options['learning_rate'])
    globals()["momentum"] = float(net_options['momentum'])
    globals()["decay"] = float(net_options['decay'])
    globals()["steps"] = [
        float(step) for step in net_options['steps'].split(',')
    ]
    globals()["scales"] = [
        float(scale) for scale in net_options['scales'].split(',')
    ]

    #Train parameters
    global max_epochs
    try:
        max_epochs = int(net_options['max_epochs'])
    except KeyError:
        nsamples = file_lines(trainlist)
        max_epochs = (max_batches * batch_size) // nsamples + 1

    seed = int(time.time())
    torch.manual_seed(seed)
    if use_cuda:
        os.environ['CUDA_VISIBLE_DEVICES'] = gpus
        torch.cuda.manual_seed(seed)
    global device
    device = torch.device("cuda" if use_cuda else "cpu")

    global model
    model = Darknet(cfgfile, use_cuda=use_cuda)
    model.load_weights(weightfile)
    #model.print_network()

    nsamples = file_lines(trainlist)
    #initialize the model
    if FLAGS.reset:
        model.seen = 0
        init_epoch = 0
    else:
        init_epoch = model.seen // nsamples

    global loss_layers
    loss_layers = model.loss_layers
    for l in loss_layers:
        l.seen = model.seen

    globals()["test_loader"] = load_testlist(testlist)
    if use_cuda:
        if ngpus > 1:
            model = torch.nn.DataParallel(model).to(device)
        else:
            model = model.to(device)

    params_dict = dict(model.named_parameters())
    params = []
    for key, value in params_dict.items():
        if key.find('.bn') >= 0 or key.find('.bias') >= 0:
            params += [{'params': [value], 'weight_decay': 0.0}]
        else:
            params += [{'params': [value], 'weight_decay': decay * batch_size}]
    global optimizer
    optimizer = optim.SGD(model.parameters(),
                          lr=learning_rate / batch_size,
                          momentum=momentum,
                          dampening=0,
                          weight_decay=decay * batch_size)

    if evaluate:
        logging('evaluating ...')
        test(0)
    else:
        try:
            print("Training for ({:d},{:d})".format(init_epoch, max_epochs))
            fscore = 0
            if init_epoch > save_interval:
                mfscore = test(init_epoch - 1)
            else:
                mfscore = 0.5
            for epoch in range(init_epoch, max_epochs):
                nsamples = train(epoch)
                if epoch > save_interval:
                    fscore = test(epoch)
                if (epoch + 1) % save_interval == 0:
                    savemodel(epoch, nsamples)
                if FLAGS.localmax and fscore > mfscore:
                    mfscore = fscore
                    savemodel(epoch, nsamples, True)
                print('-' * 90)
        except KeyboardInterrupt:
            print('=' * 80)
            print('Exiting from training by interrupt')
Example #18
0
                        default='weights/dynamic_weights.pkl',
                        help='path for dynamic weights')
    parser.add_argument('--conf_thresh',
                        type=float,
                        default=0.2,
                        help='confidence threshold')
    parser.add_argument('--nms_thresh',
                        type=float,
                        default=0.45,
                        help='nms threshold')
    parser.add_argument('--gpu', type=str, default='0', help='gpu to use')

    args = parser.parse_args()

    data_options = read_data_cfg(args.datacfg)
    darknet = parse_cfg(args.darknet)
    learnet = parse_cfg(args.learnet)
    net_options = darknet[0]
    meta_options = learnet[0]
    data_options['gpus'] = args.gpu
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

    # Configure options
    cfg.config_data(data_options)
    cfg.config_meta(meta_options)
    cfg.config_net(net_options)

    if not os.path.exists(args.img_dest):
        os.makedirs(args.img_dest)

    valid(args.valid_images, darknet, learnet, args.weightfile,
Example #19
0
from scipy.io import loadmat
from model import YOWO
from utils import *
from eval_results import *

opt = parse_opts()

dataset = opt.dataset
assert dataset == 'ucf101-24' or dataset == 'jhmdb-21', 'invalid dataset'

datacfg       = opt.data_cfg
cfgfile       = opt.cfg_file
gt_file       = 'finalAnnots.mat' # Necessary for ucf

data_options  = read_data_cfg(datacfg)
net_options   = parse_cfg(cfgfile)[0]
loss_options  = parse_cfg(cfgfile)[1]

base_path     = data_options['base']
testlist      = os.path.join(base_path, 'testlist_video.txt') # os.path.join(base_path, 'testlist_video.txt')

clip_duration = int(net_options['clip_duration'])
anchors       = loss_options['anchors'].split(',')
anchors       = [float(i) for i in anchors]
num_anchors   = int(loss_options['num'])
num_classes   = opt.n_classes

# Test parameters
conf_thresh   = 0.005
nms_thresh    = 0.4
eps           = 1e-5
Example #20
0
from region_loss import RegionLoss
from darknet import Darknet
from models.tiny_yolo import TinyYoloNet

# Training settings
#datacfg       = sys.argv[1]
#cfgfile       = sys.argv[2]
#weightfile    = sys.argv[3]

datacfg = 'cfg/my_voc.data'  #相应做了一系列修改,只设置adv patch 1个类
cfgfile = 'cfg/yolo-voc.cfg'
weightfile = 'yolov2-voc.weights'
#weightfile='voc_detect/000040.weights'

data_options = read_data_cfg(datacfg)  #以字典形式返回配置
net_options = parse_cfg(cfgfile)[0]  #以list形式返回所有block,这里只取第一个block即超参数配置

trainlist = data_options['train']
#testlist      = data_options['valid']
backupdir = data_options['backup']
nsamples = file_lines(trainlist)
gpus = data_options['gpus']  # e.g. 0,1,2,3
ngpus = len(gpus.split(','))
num_workers = int(data_options['num_workers'])
print(backupdir)

batch_size = int(net_options['batch'])
max_batches = int(net_options['max_batches'])
learning_rate = float(net_options['learning_rate'])
momentum = float(net_options['momentum'])
decay = float(net_options['decay'])
Example #21
0
    datacfg = args.datacfg
    modelcfg = args.modelcfg
    initweightfile = args.initweightfile
    backupdir = args.backupdir
    pretrain_num_epochs = args.pretrain_num_epochs
    backupdir = args.backupdir
    distiling = bool(args.distiled)

    if distiling:
        distiling_model = Darknet('./models_cfg/tekin/yolo-pose.cfg')
        distiling_model.load_weights('./backup/%s/model_backup.weights' %
                                     (datacfg[14:-5]))

    # Parse configuration files
    data_options = read_data_cfg(datacfg)
    net_options = parse_cfg(modelcfg)[0]
    trainlist = data_options['train']
    testlist = data_options['valid']
    gpus = data_options['gpus']
    meshname = data_options['mesh']
    num_workers = int(data_options['num_workers'])
    diam = float(data_options['diam'])
    vx_threshold = diam * 0.1

    if not os.path.exists(backupdir):
        makedirs(backupdir)
    batch_size = int(net_options['batch'])
    max_batches = int(net_options['max_batches'])
    learning_rate = float(net_options['learning_rate'])
    momentum = float(net_options['momentum'])
    decay = float(net_options['decay'])
    #
    # parser = argparse.ArgumentParser()
    # parser.add_argument('--darknet', type=str, required=True)
    # parser.add_argument('--learnet', type=str, required=True)
    # args = parser.parse_args()

    from utils import read_data_cfg
    from cfg import parse_cfg
    from PIL import Image, ImageDraw

    # datacfg = 'cfg/metayolo.data'
    netcfg = 'cfg/darknet_dynamic.cfg'
    metacfg = 'cfg/reweighting_net.cfg'

    # data_options = read_data_cfg(datacfg)
    darknet_blocks = parse_cfg(netcfg)
    learnet_blocks = parse_cfg(metacfg)

    # cfg.config_data(data_options)
    # cfg.config_meta(meta_options)
    # cfg.config_net(net_options)

    net = DarkMetaNet(darknet_blocks, learnet_blocks)
    net = net.cuda()

    # output: batch_size*num_base_classes, num_anchor*(id+four_position+num_classes_per_anchor), h, w
    # target: batch_size, num_base_classes, num_max_boxes*(id+four_position)
    batch_size = 2
    num_base_classes = 3

    darknet_x = torch.randn(2, 3, 416, 416)