Beispiel #1
0
def init():
    global model, cuda, classes, img_size
    global use_cuda, conf_thres, nms_thres

    try:
        model_path = Model.get_model_path('YOLOv3')
    except:
        model_path = 'model'

    config_path = os.path.join(model_path, 'yolov3.cfg')
    class_path = os.path.join(model_path, 'coco.names')
    weights_path = os.path.join(model_path, 'yolov3.weights')
    params_path = os.path.join(model_path, 'params.json')

    # get paramters and model
    params = load_params(params_path)
    img_size = params['ImageSize']
    conf_thres = params['Confidence']
    nms_thres = params['NonMaxSuppression']
    use_cuda = params['cuda']

    classes = load_classes(class_path)
    model = Darknet(config_path, img_size=img_size)
    model.load_weights(weights_path)

    cuda = torch.cuda.is_available() and use_cuda
    if cuda:
        model.cuda()

    # Set in evaluation mode
    model.eval()
def test_detect():
    if torch.cuda.is_available():
        device = torch.device('cuda')
    else:
        device = torch.device('cpu')
    model = YOLOv5(80).to(device)
    print(load_params(model, "weights/yolov5s_coco.pth", strict=False))
    dataset = LoadImages("./images", 640, 32)
    model.float().fuse().eval().requires_grad_(False)
    half = (device.type != 'cpu')
    # half = False
    model.half() if half else None
    sum_t = 0.
    # 预处理
    x, img0, path = dataset[0]
    x = x.to(device)
    x = x.half() if half else x.float()
    x /= 255
    x = x[None] if x.dim() == 3 else x
    model_info(model, x.shape[-2:])
    for i in range(200):
        t = time.time()
        target = model(x)[0]
        target = nms(target, 0.2, 0.45)
        if i >= 5:
            sum_t += time.time() - t
        # 后处理
        target = target[0]
        convert_boxes(target, x.shape[-2:], img0.shape[:2])
        boxes = target[:, :4].cpu().numpy()
        scores = target[:, 4].cpu().numpy()
        labels = target[:, 5].cpu().numpy()
        img = img0.copy()
        # 画图
        draw_target_in_image(img, boxes, labels, scores, "coco")
        img = resize_max(img, 720, 1080)
        cv.imshow("1", img)
        cv.waitKey(0)
    print(sum_t / (200 - 5))
TRAIN = 'Avg Train Loss'
VALID = 'Avg Valid Loss'
EPOCH = 'Epoch'
TRAIN_SERIES = 'Train'
VALID_SERIES = 'Valid'
X_LABEL = 'Epoch'
Y_LABEL = 'Average Loss (Duality Gap)'

full_title = 'Average Validation Loss per Epoch'

parser = argparse.ArgumentParser(description='Compare cost files.')
parser.add_argument('--model-params', help='Path to Model.', required=True)

args = parser.parse_args()

model_params = load_params(args.model_params)

after = model_params['after']
after_title = 'Average Validation Loss per Epoch after Epoch {0}'.format(after)

losses_df = pd.DataFrame()
names = []

for model in model_params['models']:
    name, path = model['name'], model['path']
    log_path = os.path.join(path, LOG_FILE)
    df = pd.read_csv(log_path)
    valid_df = pd.DataFrame(df['Avg Valid Loss'])
    valid_df = valid_df.rename(mapper={'Avg Valid Loss': name}, axis=1)
    losses_df = pd.concat([losses_df, valid_df], axis=1)
    names.append(name)
Beispiel #4
0
import subprocess
import os
from utils.utils import load_params

params_folder = 'model_params'
datasets_folder = 'datasets'
params = [
    'sf_neighborhood_sparsemax_quadratic_true.json',
    'cambridge_neighborhood_sparsemax_quadratic_true.json'
]

# Ensure that all files exist
for params_file in params:
    params_path = os.path.join(params_folder, params_file)
    assert os.path.exists(params_path)

    params_dict = load_params(params_path)
    assert os.path.exists(
        os.path.join(datasets_folder, params_dict['model']['dataset_name']))

print('Checked all files.')

for params_file in params:
    params_path = os.path.join(params_folder, params_file)
    subprocess.run(['python', 'main.py', '--train', '--params', params_path])
from utils.utils import load_params, get_data
from utils.calculations import calc_rates
from analysis.plot_utils import axis_default, set_aspect
import pandas as pd
from pylab import rcParams
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
m = -1
IDs = ['08-01_01:42:56', '08-01_22:50:10']
for nn in range(5):
    fig, ax = plt.subplots(1, 1)
    for ind, simID in enumerate(IDs):
        if ind == 0:
            noise = 1
        else:
            noise = nn
        params = load_params(simID, path1)
        dir_path = params.path + '/Data/' + simID + '/'
        filenames = sorted(item for item in os.listdir(dir_path))
        for file in filenames:
            subfiles = sorted([
                item for item in os.listdir(dir_path + file)
                if item[-3:] == 'pkl'
            ])
            data = [
                pd.read_pickle(dir_path + file + '/' + item)
                for item in subfiles
            ]
            targets = get_data(data[m], 'target', params.offset[0],
                               params.noise[noise])
            lures = get_data(data[m], 'lure', params.offset[0],
                             params.noise[noise])
pgf_with_pdflatex = {
    'pgf.texsystem': 'lualatex',
    'pgf.rcfonts': False,
    'font.family': 'serif'
}
mpl.rcParams.update(pgf_with_pdflatex)
mpl.rcParams.update({'errorbar.capsize': 3})
mpl.rcParams.update({'font.size': 14})

COST_FIELD = 'Flow Cost'
LOG_PATH = 'costs.csv'

parser = argparse.ArgumentParser(description='Comparing running times.')
parser.add_argument('--params', help='Path to params file.', required=True)

params = load_params(parser.parse_args().params)
model_folder = params['base_folder']

field = params['fields'][0]

target_path = os.path.join(model_folder, params['target_path'], 'costs.csv')
target_df = pd.read_csv(target_path)

for baseline in params['baselines']:
    baseline_path = os.path.join(model_folder, baseline['path'], 'costs.csv')
    baseline_df = pd.read_csv(baseline_path)

    numeric_baseline = pd.to_numeric(baseline_df[field])
    percent_diff = 100 * (
        (numeric_baseline - target_df[field]) / target_df[field])
Beispiel #7
0
def main(opts):
    # Create the data loader
    loader = sunnerData.DataLoader(sunnerData.ImageDataset(
        root=[[opts.path]],
        transform=transforms.Compose([
            sunnertransforms.Resize((opts.resolution, opts.resolution)),
            sunnertransforms.ToTensor(),
            sunnertransforms.ToFloat(),
            sunnertransforms.Transpose(sunnertransforms.BHWC2BCHW),
            sunnertransforms.Normalize(),
        ])),
        batch_size=opts.batch_size,
        shuffle=True,
        drop_last=True
    )

    # Create the model
    start_epoch = 0
    G = G_stylegan2(fmap_base=opts.fmap_base,
                    resolution=opts.resolution,
                    mapping_layers=opts.mapping_layers,
                    opts=opts,
                    return_dlatents=True)
    D = D_stylegan2(fmap_base=opts.fmap_base,
                    resolution=opts.resolution,
                    structure='resnet')

    # Load the pre-trained weight
    if os.path.exists(opts.resume):
        INFO("Load the pre-trained weight!")
        state = torch.load(opts.resume)
        G.load_state_dict(state['G'])
        D.load_state_dict(state['D'])
        start_epoch = state['start_epoch']
    else:
        INFO("Pre-trained weight cannot load successfully, train from scratch!")

    # Multi-GPU support
    if torch.cuda.device_count() > 1:
        INFO("Multiple GPU:" + str(torch.cuda.device_count()) + "\t GPUs")
        G = torch.nn.DataParallel(G)
        D = torch.nn.DataParallel(D)
    G.to(opts.device)
    D.to(opts.device)

    # Create the criterion, optimizer and scheduler
    loss_type = 'styleGAN'    # 'Rah' / 'styleGAN' / 'GAN'
    lr_D = 0.003
    lr_G = 0.003
    optim_D = torch.optim.Adam(D.parameters(), lr=lr_D, betas=(0.9, 0.999))
    # g_mapping has 100x lower learning rate
    params_G = [{"params": G.g_synthesis.parameters()},
				{"params": G.g_mapping.parameters(), "lr": lr_G * 0.01}]
    optim_G = torch.optim.Adam(params_G, lr=lr_G, betas=(0.9, 0.999))
    scheduler_D = optim.lr_scheduler.ExponentialLR(optim_D, gamma=0.99)
    scheduler_G = optim.lr_scheduler.ExponentialLR(optim_G, gamma=0.99)

    # Train
    if moving_average:
        avg_param_G = copy_G_params(G)
    fix_z = torch.randn([opts.batch_size, 512]).to(opts.device)
    softplus = torch.nn.Softplus()
    Loss_D_list = [0.0]
    Loss_G_list = [0.0]
    for ep in range(start_epoch, opts.epoch):
        bar = tqdm(loader)
        loss_D_list = []
        loss_G_list = []
        for i, (real_img,) in enumerate(bar):

            real_img = real_img.to(opts.device)
            latents = torch.randn([real_img.size(0), 512]).to(opts.device)

            # =======================================================================================================
            #   (1) Update D network: D_logistic_r1(default)
            # =======================================================================================================
            # Compute adversarial loss toward discriminator
            real_img = real_img.to(opts.device)
            real_logit = D(real_img)
            fake_img, fake_dlatent = G(latents)
            fake_logit = D(fake_img.detach())

            if loss_type == 'styleGAN':
                d_loss = softplus(fake_logit)
                d_loss = d_loss + softplus(-real_logit)

                # original
                r1_penalty = D_logistic_r1(real_img.detach(), D)
                d_loss = (d_loss + r1_penalty).mean()
                # lite
                # d_loss = d_loss.mean()
            elif loss_type == 'Rah':
                # difference between real and fake:
                r_f_diff = real_logit - torch.mean(fake_logit)

                # difference between fake and real samples
                f_r_diff = fake_logit - torch.mean(real_logit)

                d_loss = (torch.mean(torch.nn.ReLU()(1 - r_f_diff))
                          + torch.mean(torch.nn.ReLU()(1 + f_r_diff)))
            elif loss_type == 'GAN':
                import torch.nn as nn
                criterion = nn.BCEWithLogitsLoss()
                d_loss = (criterion(real_logit.squeeze(), torch.ones(real_img.size(0)).to(opts.device))
                          + criterion(fake_logit.squeeze(), torch.zeros(fake_img.size(0)).to(opts.device)))

            else:
                print("Loss type not exist!")
                exit()

            loss_D_list.append(d_loss.mean().item())

            # Update discriminator
            optim_D.zero_grad()
            d_loss.backward()
            optim_D.step()

            # =======================================================================================================
            #   (2) Update G network: G_logistic_ns_pathreg(default)
            # =======================================================================================================
            # if i % CRITIC_ITER == 0:
            G.zero_grad()
            fake_scores_out = D(fake_img)
            if loss_type == 'styleGAN':
                _g_loss = softplus(-fake_scores_out)

                # Compute |J*y|.
                # pl_noise = (torch.randn(fake_img.shape) / np.sqrt(fake_img.shape[2] * fake_img.shape[3])).to(fake_img.device)
                # pl_grads = grad(torch.sum(fake_img * pl_noise), fake_dlatent, retain_graph=True)[0]
                # pl_lengths = torch.sqrt(torch.sum(torch.sum(torch.mul(pl_grads, pl_grads), dim=2), dim=1))
                # pl_mean = PL_DECAY * torch.sum(pl_lengths)
                #
                # pl_penalty = torch.mul(pl_lengths - pl_mean, pl_lengths - pl_mean)
                # reg = pl_penalty * PL_WEIGHT
                #
                # # original
                # g_loss = (_g_loss + reg).mean()
                # lite
                g_loss = _g_loss.mean()

            elif loss_type == 'Rah':
                real_scores_out = D(real_img)
                # difference between real and fake:
                r_f_diff = real_scores_out - torch.mean(fake_scores_out)

                # difference between fake and real samples
                f_r_diff = fake_scores_out - torch.mean(real_scores_out)

                # return the loss
                g_loss = (torch.mean(torch.nn.ReLU()(1 + r_f_diff))
                          + torch.mean(torch.nn.ReLU()(1 - f_r_diff)))
            elif loss_type == 'GAN':
                import torch.nn as nn
                criterion = nn.BCEWithLogitsLoss()
                g_loss = criterion(fake_scores_out.squeeze(), torch.ones(fake_img.size(0)).to(opts.device))
            else:
                print("Loss type not exist!")
                exit()
            loss_G_list.append(g_loss.mean().item())

            # Update generator
            g_loss.backward(retain_graph=True)
            optim_G.step()

            # Output training stats
            bar.set_description(
                "Epoch {} [{}, {}] [G]: {} [D]: {}".format(ep, i + 1, len(loader), loss_G_list[-1], loss_D_list[-1]))
            if moving_average:
                for p, avg_p in zip(G.parameters(), avg_param_G):
                    avg_p.mul_(0.999).add_(0.001, p.data)

        # Save the result
        Loss_G_list.append(np.mean(loss_G_list))
        Loss_D_list.append(np.mean(loss_D_list))

        # Save model
        state = {
            'G': G.state_dict(),
            'D': D.state_dict(),
            'Loss_G': Loss_G_list,
            'Loss_D': Loss_D_list,
            'start_epoch': ep,
        }
        torch.save(state, os.path.join(opts.det, 'models', 'all_model_epoch_%d.pth' % (ep)))

        # Check how the generator is doing by saving G's output on fixed_noise
        if moving_average:
            backup_para = copy_G_params(G)
            load_params(G, avg_param_G)
        with torch.no_grad():
            fake_img = G(fix_z)[0].detach().cpu()
            save_image(fake_img, os.path.join(opts.det, 'images', str(ep) + '.png'), nrow=5, normalize=True)
        # Save avg_G model
        torch.save(G.state_dict(), os.path.join(opts.det, 'models', 'Avg_G_epoch_%d.pth' % (ep)))

        if moving_average:
            load_params(G, backup_para)

        scheduler_D.step()
        scheduler_G.step()

    # Plot the total loss curve
    Loss_D_list = Loss_D_list[1:]
    Loss_G_list = Loss_G_list[1:]
    plotLossCurve(opts, Loss_D_list, Loss_G_list)
Beispiel #8
0
            if len(bolded) > 1:
                lines.append(' & '.join(bolded) + '\\\\')
            start += 1

    # Fix issue with line headers
    lines[2] = lines[3].replace('\\\\', ' ') + lines[2][1:]
    lines.pop(3)
    return '\n'.join(lines)


parser = argparse.ArgumentParser(description='Compare cost files.')
parser.add_argument('--params', help='Parameters JSON file.', required=True)

args = parser.parse_args()

params = load_params(params_file_path=args.params)

model_folder = params['base_folder']

# Load target dataset into Pandas
costs_file = COSTS_FILE
if 'target_optimizer' in params:
    costs_file = 'costs-{0}.csv'.format(params['target_optimizer'])

target_file = os.path.join(model_folder, params['target_path'], costs_file)
target_df = pd.read_csv(target_file)

output_folder = os.path.join(OUTPUT_BASE, params['output_folder'])
if not os.path.exists(output_folder):
    os.mkdir(output_folder)
Beispiel #9
0
def main():
    # Parse command line arguments
    parser = argparse.ArgumentParser(
        description='Computing Min Cost Flows using Graph Neural Networks.')
    parser.add_argument('--params', type=str, help='Parameters JSON file.')
    parser.add_argument('--train',
                        action='store_true',
                        help='Flag to specify training.')
    parser.add_argument('--generate',
                        action='store_true',
                        help='Flag to specify dataset generation.')
    parser.add_argument('--test',
                        action='store_true',
                        help='Flag to specify testing.')
    parser.add_argument('--slsqp',
                        action='store_true',
                        help='Flag to specify using SLSQP baseline.')
    parser.add_argument(
        '--trust-constr',
        action='store_true',
        help='Flag to specify using Trust Constraint baseline.')
    parser.add_argument(
        '--fixed',
        action='store_true',
        help='Flag to specify using the Fixed Proportions baseline.')
    parser.add_argument('--view-params',
                        action='store_true',
                        help='Flag to specify viewing model parameters.')
    parser.add_argument('--graph-stats', action='store_true')
    parser.add_argument('--model', type=str, help='Path to trained model.')
    args = parser.parse_args()

    # Fetch parameters
    if args.params is not None:
        params = load_params(args.params)
    else:
        # Load parameters used to create the given model
        params = restore_params(args.model)

    model_params = params['model'] if 'model' in params else params

    if args.train:
        mcf_solver = FlowModelRunner(params=model_params)
        mcf_solver.train()
    elif args.generate:
        generate(params['generate'])
    elif args.test:
        if args.slsqp:
            model_params['optimizer'] = {
                'use_optimizer': True,
                'optimizer_name': 'slsqp'
            }
        elif args.trust_constr:
            model_params['optimizer'] = {
                'use_optimizer': True,
                'optimizer_name': 'trust_constr'
            }
        mcf_solver = FlowModelRunner(params=model_params)
        mcf_solver.test(args.model)
    elif args.random_walks:
        random_walks(params['generate']['graph_names'][0],
                     params['model']['unique_neighborhoods'])
    elif args.graph_stats:
        graph_stats(params['generate']['graph_names'][0])
    elif args.trust_constr:
        baseline = OptimizationBaseline(params=model_params,
                                        optimizer_name='trust_constr')
        baseline.optimize()
    elif args.slsqp:
        baseline = OptimizationBaseline(params=model_params,
                                        optimizer_name='slsqp')
        baseline.optimize()
    elif args.fixed:
        baseline = FixedBaseline(params=model_params)
        baseline.test(model_path=None)
    elif args.view_params:
        print(json.dumps(params, indent=2, sort_keys=True))
def test_test():
    img_path_list, target_list = \
        make_dataset(r"D:\datasets\VOCdevkit\VOC0712\JPEGImages",
                     r"D:\datasets\VOCdevkit\VOC0712\Annotations",
                     r"D:\datasets\VOCdevkit\VOC0712\pkl\voc_0712_test.pkl",
                     r"D:\datasets\VOCdevkit\VOC0712\ImageSets\Main\test.txt", "voc", False)

    dataset = LoadImagesAndLabels(img_path_list, target_list, 640, 32, 0.5,
                                  False, {"batch_size": 1})

    # x: Tensor[C, H, W], target_out: Tensor[X, 6], path: str. [idx, cls, *xywh]
    # test show
    def test1():
        for x, target, img_path in dataset:
            print(x.shape, target, img_path)
            x = x.numpy()
            x = x.transpose(1, 2, 0)[:, :, ::-1]  # to (H, W, C), RGB to BGR,
            x = np.ascontiguousarray(x)
            h, w = x.shape[:2]
            boxes = target[:, 2:].numpy()
            labels = target[:, 1].numpy()
            boxes = cxcywh2ltrb(boxes)
            boxes[:, 0::2] *= w  # lr
            boxes[:, 1::2] *= h  # tb
            draw_target_in_image(x, boxes, labels, None, "voc")
            cv.imshow("1", x)
            cv.waitKey(0)

    # test1()
    if torch.cuda.is_available():
        device = torch.device('cuda')
    else:
        device = torch.device('cpu')
    model = YOLOv5(20).to(device)
    print(load_params(model, "weights/yolov5s_voc.pth", strict=False))
    model.float().fuse().eval().requires_grad_(False)
    half = (device.type != 'cpu')
    model.half() if half else None
    hyp = {
        "obj_pw": 0.911,
        "cls_pw": 0.631,
        "anchor_t": 2.91,
        "box_lw": 0.0296,
        "obj_lw": 0.301,
        "cls_lw": 0.06075
    }
    loss_func = Loss(model, hyp)
    loss = torch.zeros((4, ), device=device)
    for x, target0, img_path in reversed(dataset):
        img0 = x.numpy()
        img0 = img0.transpose(1, 2, 0)[:, :, ::-1]  # to (H, W, C), RGB to BGR,
        img0 = np.ascontiguousarray(img0)
        img = img0.copy()
        # 预处理
        x, target0 = x.to(device), target0.to(device)
        x = x.half() if half else x.float()
        x /= 255
        if x.dim() == 3:
            x = x[None]
        # 预测
        target, loss_target = model(x)
        loss += loss_func([loss_t.float() for loss_t in loss_target],
                          target0)[1]
        target = nms(target, 0.001, 0.6)

        # 后处理
        # 1
        target = target[0]
        boxes = target[:, :4].cpu().numpy()
        scores = target[:, 4].cpu().numpy()
        labels = target[:, 5].cpu().numpy()
        draw_target_in_image(img, boxes, labels, scores, "voc")
        cv.imshow("pred", img)
        # 2
        img2 = img0.copy()
        h, w = img2.shape[:2]
        boxes = target0[:, 2:].cpu().numpy()
        labels = target0[:, 1].cpu().numpy()
        boxes = cxcywh2ltrb(boxes)
        boxes[:, 0::2] *= w  # lr
        boxes[:, 1::2] *= h  # tb
        draw_target_in_image(img2, boxes, labels, None, "voc")
        cv.imshow("target", img2)
        cv.waitKey(0)