from torchvision import transforms from torch.autograd import Variable from core import dataset from core.utils import * from core.cfg import parse_cfg from core.cfg import cfg from tool.darknet.darknet import Darknet # Training settings datacfg = sys.argv[1] cfgfile = sys.argv[2] weightfile = sys.argv[3] data_options = read_data_cfg(datacfg) net_options = parse_cfg(cfgfile)[0] trainlist = data_options['train'] testlist = data_options['valid'] # backupdir = data_options['backup'] gpus = data_options['gpus'] # e.g. 0,1,2,3 ngpus = len(gpus.split(',')) num_workers = int(data_options['num_workers']) batch_size = int(net_options['batch']) max_batches = int(net_options['max_batches']) learning_rate = float(net_options['learning_rate']) momentum = float(net_options['momentum']) decay = float(net_options['decay']) steps = [float(step) for step in net_options['steps'].split(',')] scales = [float(scale) for scale in net_options['scales'].split(',')]
cls_id = box[6 + 2 * j] prob = det_conf * cls_conf fps[i].write('%s %f %f %f %f %f\n' % (imgid, prob, x1, y1, x2, y2)) for i in range(n_cls): fps[i].close() # import pdb; pdb.set_trace() if __name__ == '__main__': import sys if len(sys.argv) in [5, 6, 7]: datacfg = sys.argv[1] darknet = parse_cfg(sys.argv[2]) learnet = parse_cfg(sys.argv[3]) weightfile = sys.argv[4] if len(sys.argv) >= 6: gpu = sys.argv[5] else: gpu = '0' if len(sys.argv) == 7: use_baserw = True else: use_baserw = False data_options = read_data_cfg(datacfg) net_options = darknet[0] meta_options = learnet[0] data_options['gpus'] = gpu
raise NotImplementedError("Image path note recognized!") return labpath if __name__ == '__main__': from core.utils import read_data_cfg from core.cfg import parse_cfg datacfg = 'cfg/metayolo.data' netcfg = 'cfg/dynamic_darknet_last.cfg' metacfg = 'cfg/learnet_last.cfg' data_options = read_data_cfg(datacfg) net_options = parse_cfg(netcfg)[0] meta_options = parse_cfg(metacfg)[0] cfg.config_data(data_options) cfg.config_meta(meta_options) cfg.config_net(net_options) cfg.num_gpus = 4 metafiles = 'data/voc_metadict1_full.txt' trainlist = '/scratch/bykang/datasets/voc_train.txt' metaset = MetaDataset(metafiles=metafiles, train=True) metaloader = torch.utils.data.DataLoader( metaset, batch_size=metaset.batch_size, shuffle=False,