Exemplo n.º 1
0
def train(train_queue, model, criterion, optimizer, epoch_str):
    loss_meter = util.AvgrageMeter()
    top1 = util.AvgrageMeter()
    top5 = util.AvgrageMeter()
    model.train()

    length = train_queue.__len__()
    for step, (input_data, target) in enumerate(train_queue):
        input_data = input_data.cuda()
        target = target.cuda()

        optimizer.zero_grad()
        res = model(input_data)
        loss = criterion(res, target)
        loss.backward()
        nn.utils.clip_grad_norm(model.parameters(), args.grad_clip)
        optimizer.step()

        prec1, prec5 = util.accuracy(res, target, top_k=(1, 5))
        n = input_data.size(0)
        loss_meter.update(loss.item(), n)
        top1.update(prec1.item(), n)
        top5.update(prec5.item(), n)

        if step % args.report_freq == 0:
            logging.info(
                'train - epoch:{:}\tbatch:[{:03d}/{:03d}]\tavg_loss:{:.6f}\ttop1_acc:{:.2f}%\ttop5_acc:{:.2f}%'
                .format(epoch_str, step, length, loss_meter.avg, top1.avg,
                        top5.avg))

    return top1.avg, loss_meter.avg
Exemplo n.º 2
0
def infer(valid_queue, model, criterion, epoch_str):
    loss_meter = util.AvgrageMeter()
    top1 = util.AvgrageMeter()
    top5 = util.AvgrageMeter()
    model.eval()
    length = valid_queue.__len__()
    with torch.no_grad():
        for step, (input_data, target) in enumerate(valid_queue):
            input_data = input_data.cuda()
            target = target.cuda()

            res = model(input_data)
            loss = criterion(res, target)

            prec1, prec5 = util.accuracy(res, target, top_k=(1, 5))
            n = input_data.size(0)
            loss_meter.update(loss.item(), n)
            top1.update(prec1.item(), n)
            top5.update(prec5.item(), n)

            if step % args.report_freq == 0:
                logging.info(
                    'valid - epoch:{:}\tbatch:[{:03d}/{:03d}]\tavg_loss:{:.6f}\ttop1_acc:{:.2f}%\ttop5_acc:{:.2f}%'
                    .format(epoch_str, step, length, loss_meter.avg, top1.avg,
                            top5.avg))
    return top1.avg, loss_meter.avg
Exemplo n.º 3
0
def train(train_queue, valid_queue, model, architect, criterion, optimizer,
          lr):
    objs = utils.AvgrageMeter()  # 用于保存loss的值
    accs = utils.AvgrageMeter()
    MIoUs = utils.AvgrageMeter()
    fscores = utils.AvgrageMeter()

    # device = torch.device('cuda' if torch.cuda.is_avaitargetsle() else 'cpu')
    if args.gpu == -1:
        device = torch.device('cpu')
    else:
        device = torch.device('cuda:{}'.format(args.gpu))

    for step, (input, target) in enumerate(
            train_queue):  #每个step取出一个batch,batchsize是64(256个数据对)
        model.train()
        n = input.size(0)

        input = input.to(device)
        target = target.to(device)

        # get a random minibatch from the search queue with replacement
        input_search, target_search = next(iter(valid_queue))
        input_search = input_search.to(device)
        target_search = target_search.to(device)

        architect.step(input,
                       target,
                       input_search,
                       target_search,
                       lr,
                       optimizer,
                       unrolled=args.unrolled)

        optimizer.zero_grad()
        logits = model(input)
        logits = logits.to(device)
        loss = criterion(logits, target)
        evaluater = Evaluator(dataset_classes)
        loss.backward()
        nn.utils.clip_grad_norm(model.parameters(), args.grad_clip)
        optimizer.step()

        #prec = utils.Accuracy(logits, target)
        #prec1 = utils.MIoU(logits, target, dataset_classes)
        evaluater.add_batch(target, logits)
        miou = evaluater.Mean_Intersection_over_Union()
        fscore = evaluater.Fx_Score()
        acc = evaluater.Pixel_Accuracy()

        objs.update(loss.item(), n)
        MIoUs.update(miou.item(), n)
        fscores.update(fscore.item(), n)
        accs.update(acc.item(), n)

        if step % args.report_freq == 0:
            logging.info('train %03d %e %f %f %f', step, objs.avg, accs.avg,
                         fscores.avg, MIoUs.avg)

    return accs.avg, objs.avg, fscores.avg, MIoUs.avg
Exemplo n.º 4
0
def train(data, model, optimizer, epoch, device, log_dir):
    objs = util.AvgrageMeter()
    recon_objs = util.AvgrageMeter()
    kl_objs = util.AvgrageMeter()
    # TRAINING

    model.train()

    for step, graph_batch in enumerate(data.train_dataloader):
        for i in range(len(graph_batch)):
            graph_batch[i].to(device)
        loss, recon_loss, kl_loss = model(graph_batch)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        n = graph_batch[0].num_graphs
        objs.update(loss.data.item(), n)
        recon_objs.update(recon_loss.data.item(), n)
        kl_objs.update(kl_loss.data.item(), n)

        config_dict = {
            'epoch': epoch,
            'recon_loss': recon_objs.avg,
            'kl_loss': kl_objs.avg,
            'loss': objs.avg,
        }

        with open(os.path.join(log_dir, 'loss.txt'), 'a') as file:
            json.dump(str(config_dict), file)
            file.write('\n')

    logging.info('train %03d %.5f', step, objs.avg)

    return objs.avg
Exemplo n.º 5
0
def train(train_data, model,criterion, optimizer, epoch, device,alpha, data_config, log_dir):
    objs = util.AvgrageMeter()
    vae_objs=util.AvgrageMeter()
    acc_objs=util.AvgrageMeter()
    # TRAINING
    preds = []
    targets = []

    model.train()
   
    data_loader = DataLoader(train_data, shuffle=True, num_workers=data_config['num_workers'], pin_memory=True, batch_size=data_config['batch_size'])
    for step, graph_batch in enumerate(data_loader):
        for i in range(len(graph_batch)):
                graph_batch[i].to(device)
        vae_loss, recon_loss, kl_loss, pred =model(graph_batch)
        pred=pred.view(-1)
        if args.test:
            acc_loss= criterion(pred.view(-1), graph_batch[0].test_acc)
        else:
            acc_loss= criterion(pred.view(-1), graph_batch[0].acc)
        loss=alpha*vae_loss +(1-alpha)*acc_loss

        preds.extend((pred.detach().cpu().numpy()))
        if args.test:
            targets.extend(graph_batch[0].test_acc.detach().cpu().numpy())
        else:
            targets.extend(graph_batch[0].acc.detach().cpu().numpy())

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        n = graph_batch[0].num_graphs
        objs.update(loss.data.item(), n)
        vae_objs.update(vae_loss.data.item(), n)
        acc_objs.update(acc_loss.data.item(), n)


            
    config_dict = {
            'epoch': epoch,
            'vae_loss':vae_objs.avg,
            'acc_loss': acc_objs.avg,
            'loss':objs.avg,
            }
    
    with open(os.path.join(log_dir, 'loss.txt'), 'a') as file:
        json.dump(str(config_dict), file)
        file.write('\n')


    logging.info('train %03d %.5f', step, objs.avg)
    train_results = util.evaluate_metrics(np.array(targets), np.array(preds), prediction_is_first_arg=False)
    logging.info('train metrics:  %s', train_results)
    return objs.avg, train_results
Exemplo n.º 6
0
def infer(test_queue, model, criterion):
    objs = util.AvgrageMeter()
    accs = util.AvgrageMeter()
    MIoUs = util.AvgrageMeter()
    fscores = util.AvgrageMeter()
    model.eval()

    if args.gpu == -1:
        device = torch.device('cpu')
    else:
        device = torch.device('cuda:{}'.format(args.gpu))

    save_path = args.model_path[:-10] + 'predict'
    print(save_path)
    if not os.path.exists(save_path):
        os.mkdir(save_path)

    for step, (input, target, data_list) in enumerate(test_queue):
        input = input.to(device)
        target = target.to(device)
        n = input.size(0)

        logits = model(input)
        util.save_pred_WHU(logits, save_path, data_list)

        loss = criterion(logits, target)
        evaluater = Evaluator(dataset_classes)

        evaluater.add_batch(target, logits)
        miou = evaluater.Mean_Intersection_over_Union()
        fscore = evaluater.Fx_Score()
        acc = evaluater.Pixel_Accuracy()

        objs.update(loss.item(), n)
        MIoUs.update(miou.item(), n)
        fscores.update(fscore.item(), n)
        accs.update(acc.item(), n)

        if step % args.report_freq == 0:
            logging.info('test %03d %e %f %f %f', step, objs.avg, accs.avg,
                         fscores.avg, MIoUs.avg)

    return accs.avg, objs.avg, fscores.avg, MIoUs.avg
Exemplo n.º 7
0
def infer(valid_queue, model, criterion):
    objs = utils.AvgrageMeter()
    accs = utils.AvgrageMeter()
    MIoUs = utils.AvgrageMeter()
    fscores = utils.AvgrageMeter()
    model.eval()

    # device = torch.device(torch.cuda if torch.cuda.is_avaitargetsle() else cpu)
    if args.gpu == -1:
        device = torch.device('cpu')
    else:
        device = torch.device('cuda:{}'.format(args.gpu))

    for step, (input, target) in enumerate(valid_queue):

        input = input.to(device)
        target = target.to(device)

        logits = model(input)
        loss = criterion(logits, target)
        evaluater = Evaluator(dataset_classes)

        #prec = utils.Accuracy(logits, target)
        #prec1 = utils.MIoU(logits, target, dataset_classes)
        evaluater.add_batch(target, logits)
        miou = evaluater.Mean_Intersection_over_Union()
        fscore = evaluater.Fx_Score()
        acc = evaluater.Pixel_Accuracy()

        n = input.size(0)

        objs.update(loss.item(), n)
        MIoUs.update(miou.item(), n)
        fscores.update(fscore.item(), n)
        accs.update(acc.item(), n)

        if step % args.report_freq == 0:
            logging.info('valid %03d %e %f %f %f', step, objs.avg, accs.avg,
                         fscores.avg, MIoUs.avg)

    return accs.avg, objs.avg, fscores.avg, MIoUs.avg
Exemplo n.º 8
0
def infer(valid_queue, model, criterion):
  objs = utils.AvgrageMeter()
Exemplo n.º 9
0
def train(train_queue, valid_queue, model, architect, criterion, optimizer, lr):
  objs = utils.AvgrageMeter()# 用于保存loss的值
Exemplo n.º 10
0
    print(F.softmax(model.alphas_reduce, dim=-1))

    # training
    train_acc, train_obj, train_fscores, train_MIoU = train(train_queue, valid_queue, model, architect, criterion, optimizer, lr)
    logging.info('train_acc %f _fscores %f _MIoU %f', train_acc, train_fscores, train_MIoU)

    # validation
    valid_acc, valid_obj, valid_fscores, valid_MIoU = infer(valid_queue, model, criterion)
    logging.info('valid_acc %f _fcores %f _MIoU %f', valid_acc, valid_fscores, valid_MIoU)

    utils.save(model, os.path.join(args.save, 'weights.pt'))
    f_arch.write(str(F.softmax(model.arch_parameters()[0],-1)))
  f_arch.close()
def train(train_queue, valid_queue, model, architect, criterion, optimizer, lr):
  objs = utils.AvgrageMeter()# 用于保存loss的值
  accs = utils.AvgrageMeter()
  MIoUs = utils.AvgrageMeter()
  fscores = utils.AvgrageMeter()

  # device = torch.device('cuda' if torch.cuda.is_avaitargetsle() else 'cpu')
  if args.gpu == -1:
    device = torch.device('cpu')
  else:
    device = torch.device('cuda:{}'.format(args.gpu))

  for step, (input, target) in enumerate(train_queue):#每个step取出一个batch,batchsize是64(256个数据对)
    model.train()
    n = input.size(0)

    input = input.to(device)
    target = target.to(device)