Пример #1
0
def summarize_probability(prob_dict, action_to_idx, title):
    acc = [AverageMeter(),AverageMeter()]
    stat = {}
    for vname, item in tqdm(prob_dict.items(), total=len(prob_dict)):
        try:
            action_name = vname.split('/')[-3]
        except:
            action_name = vname.split('/')[-2]
        target = action_to_idx(action_name)
        mean_prob = torch.stack(item['mean_prob'], 0).mean(0)
        mean_top1, mean_top5 = calc_topk_accuracy(mean_prob, torch.LongTensor([target]).cuda(), (1,5))
        stat[vname] = {'mean_prob': mean_prob.tolist()}
        acc[0].update(mean_top1.item(), 1)
        acc[1].update(mean_top5.item(), 1)

    print('Mean: Acc@1: {acc[0].avg:.4f} Acc@5: {acc[1].avg:.4f}'
          .format(acc=acc))

    with open(os.path.join(os.path.dirname(args.test), 
        '%s-prob-%s.json' % (os.path.basename(args.test), title)), 'w') as fp:
        json.dump(stat, fp)
    return acc
Пример #2
0
def train(train_loader, model, criterion, optimizer, epoch, print_freq=1000):
    # amp_handle = amp.init()
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()

    model = model.train()
    end = time.time()
    print("Start Training")
    epoch_time = time.time()
    for i, (imgs, caps, lengths) in enumerate(train_loader):
        print("%2.2f" % (i / len(train_loader) * 100), "\%", end="\r")
        data_time.update(time.time() - end)

        optimizer.zero_grad()
        output_imgs, output_caps = model(imgs, caps, lengths)
        loss = criterion(output_imgs, output_caps)

        loss.backward()
        optimizer.step()
        losses.update(loss.item(), imgs.size(0))

        batch_time.update(time.time() - end)
        end = time.time()

        if i % print_freq == 0 or i == (len(train_loader) - 1):
            print("Epoch: [{0}][{1}/{2}]\t"
                  "Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t"
                  "Data {data_time.val:.3f} ({data_time.avg:.3f})\t"
                  "Loss {loss.val:.4f} ({loss.avg:.4f})".format(
                      epoch,
                      i,
                      len(train_loader),
                      batch_time=batch_time,
                      data_time=data_time,
                      loss=losses,
                  ))
    print("Epoch time :", time.time() - epoch_time)
    return losses.avg, batch_time.avg, data_time.avg
Пример #3
0
def do_eval(opt, epoch, model, DatasetFactory, logger, best):
    # Based this code on test.py's non-prefetched code path:
    Detector = detector_factory[opt.task]
    dataset = DatasetFactory(opt, "val")
    detector = Detector(opt, model)
    results = {}
    num_iters = len(dataset)
    bar = Bar('{}'.format(opt.exp_id), max=num_iters)
    time_stats = ['tot', 'load', 'pre', 'net', 'dec', 'post', 'merge']
    avg_time_stats = {t: AverageMeter() for t in time_stats}
    for ind in range(num_iters):
        img_id = dataset.images[ind]
        img_info = dataset.coco.loadImgs(ids=[img_id])[0]
        img_path = os.path.join(dataset.img_dir, img_info['file_name'])

        if opt.task == 'ddd':
            ret = detector.run(img_path, img_info['calib'])
        else:
            ret = detector.run(img_path)

        results[img_id] = ret['results']
        Bar.suffix = '[{0}/{1}]|Tot: {total:} |ETA: {eta:} '.format(
                        ind, num_iters, total=bar.elapsed_td, eta=bar.eta_td)
        for t in avg_time_stats:
            avg_time_stats[t].update(ret[t])
            Bar.suffix = Bar.suffix + '|{} {:.3f} '.format(t, avg_time_stats[t].avg)
        bar.next()
    bar.finish()
    metric = float("-inf")
    # Capture metric of interest, e.g., for COCO eval, something like AP50:
    eval_stats = dataset.run_eval(results, opt.save_dir, logger)
    if uses_coco_eval(opt):
        ap50 = eval_stats[1]
        ap25 = eval_stats[12]
        metric = ap25
        # Log results to log.txt and/or tensorboard:
        logger.scalar_summary("val_ap50", ap50, epoch)
        logger.scalar_summary("val_ap25", ap25, epoch)
    else:
        # Pascal VOC:
        metric = eval_stats["Mean AP"]
        # Log results to log.txt and/or tensorboard:
        logger.scalar_summary("mean_AP", metric, epoch)

    # Best model checkpointing:
    if metric > best:
        best = metric
        save_model(
            os.path.join(opt.save_dir, "model_best.pth"), epoch, model
        )
    return best
Пример #4
0
def eval_one_epoch(model, loader, criterion, epoch, logger, args):

    losses = AverageMeter("Loss")
    y_true, y_pred = [], []
    model.eval()

    with torch.no_grad():
        for batch_idx, (data, target, idx) in enumerate(loader):
            data = data.cuda()
            target = target.cuda()

            z, logits = model(data)
            loss = criterion(logits, target.view(-1))
            losses.update(loss.item(), data.shape[0])

            probabilities = nn.Softmax(dim=1)(logits)
            _, predictions = torch.max(probabilities, 1)

            y_pred.append(predictions.cpu().numpy().reshape(-1))
            y_true.append(target.cpu().numpy().reshape(-1))

    # append invalid samples at the beginning of the test sequence
    if loader.dataset.prefix == "test":
        ws = data.shape[1] - 1
        samples_invalid = [y_true[0][0]] * ws
        y_true.append(samples_invalid)
        y_pred.append(samples_invalid)

    y_true = np.concatenate(y_true, 0)
    y_pred = np.concatenate(y_pred, 0)

    acc = 100.0 * metrics.accuracy_score(y_true, y_pred)
    fm = 100.0 * metrics.f1_score(y_true, y_pred, average="macro")
    fw = 100.0 * metrics.f1_score(y_true, y_pred, average="weighted")

    if logger:
        logger.add_scalars("Loss", {"CrossEntropy": losses.avg}, epoch)
        logger.add_scalar("Acc", acc, epoch)
        logger.add_scalar("Fm", fm, epoch)
        logger.add_scalar("Fw", fw, epoch)

    if epoch % 50 == 0 or not args.train_mode:
        plot_confusion(
            y_true,
            y_pred,
            os.path.join(model.path_visuals, f"cm/{loader.dataset.prefix}"),
            epoch,
            class_map=args.class_map,
        )

    return losses.avg, acc, fm, fw
Пример #5
0
def test(opt):
    os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpus_str

    Dataset = dataset_factory[opt.test_dataset]
    opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
    print(opt)
    Logger(opt)

    split = "val" if not opt.trainval else "test"
    dataset = Dataset(opt, split)
    detector = Detector(opt)

    if opt.load_results != "":  # load results in json
        load_results = json.load(open(opt.load_results, "r"))

    results = {}
    num_iters = len(dataset) if opt.num_iters < 0 else opt.num_iters
    bar = Bar("{}".format(opt.exp_id), max=num_iters)
    time_stats = ["tot", "load", "pre", "net", "dec", "post", "merge"]
    avg_time_stats = {t: AverageMeter() for t in time_stats}
    for ind in range(num_iters):
        img_id = dataset.images[ind]
        img_info = dataset.coco.loadImgs(ids=[img_id])[0]
        img_path = os.path.join(dataset.img_dir, img_info["file_name"])
        input_meta = {}
        if "calib" in img_info:
            input_meta["calib"] = img_info["calib"]
        if opt.tracking and ("frame_id" in img_info) and img_info["frame_id"] == 1:
            detector.reset_tracking()
            input_meta["pre_dets"] = load_results[img_id]

        ret = detector.run(img_path, input_meta)
        results[img_id] = ret["results"]

        Bar.suffix = "[{0}/{1}]|Tot: {total:} |ETA: {eta:} ".format(
            ind, num_iters, total=bar.elapsed_td, eta=bar.eta_td
        )
        for t in avg_time_stats:
            avg_time_stats[t].update(ret[t])
            Bar.suffix = Bar.suffix + "|{} {:.3f} ".format(t, avg_time_stats[t].avg)
        bar.next()
    bar.finish()
    if opt.save_results:
        print(
            "saving results to", opt.save_dir + "/save_results_{}{}.json".format(opt.test_dataset, opt.dataset_version)
        )
        json.dump(
            _to_list(copy.deepcopy(results)),
            open(opt.save_dir + "/save_results_{}{}.json".format(opt.test_dataset, opt.dataset_version), "w"),
        )
    dataset.run_eval(results, opt.save_dir)
Пример #6
0
def eval_model(epoch, model, loader):
    global CURRENT_ACC
    model.eval()
    model.to(device)

    positive_distance_meter = AverageMeter()
    negative_distance_meter = AverageMeter()
    loss_meter = AverageMeter()
    acc_meter = AverageMeter()

    print('=' * 20 + "Model Eval" + '=' * 20)
    for i, batch in tqdm(enumerate(loader)):
        start = time.time()
        with torch.no_grad():
            loss, acc, positive_distance, negative_distance = batch_eval(batch, model)
            loss_meter.update(loss.item())
            acc_meter.update(acc.item())
            positive_distance_meter.update(positive_distance.mean().item())
            negative_distance_meter.update(negative_distance.mean().item())

        end = time.time()
        used_time = end - start
        if (i) % (args.log_every / 2) == 0:
            display = 'epoch=' + str(epoch) + \
                      '\tglobal_step=%d' % (GLOBAL_STEP) + \
                      '\tloss=%.4f' % (loss_meter.val) + \
                      '\tloss_avg=%.4f' % (loss_meter.avg) + \
                      '\tpos_avg=%.4f' % (positive_distance_meter.avg) + \
                      '\tneg_avg=%.4f' % (negative_distance_meter.avg) + \
                      '\tacc=%.4f' % (acc_meter.avg) + \
                      '\ttime=%.2fit/s' % (1. / used_time)
            tqdm.write(display)
    print("Final Acc: %.6f" % (acc_meter.avg))
    print("Final Loss Acc: %.6f" % (loss_meter.avg))
    print("Final Positive Distance: %.6f" % (positive_distance_meter.avg))
    print("Final Negative Distance: %.6f" % (negative_distance_meter.avg))
    CURRENT_ACC = acc_meter.avg
    return
Пример #7
0
def validate(data_loader, model, criterion, device, epoch, args):
    losses = AverageMeter()
    accuracy = AverageMeter()
    model.eval()
    with torch.no_grad():
        for idx, (input_seq, target) in tqdm(enumerate(data_loader),
                                             total=len(data_loader)):
            input_seq = input_seq.to(device)
            target = target.to(device)
            B = input_seq.size(0)
            output, _ = model(input_seq)

            #[_, N, D] = output.size()
            #output = output.view(B*N, D)

            #if len(target.shape) == 1:
            #    target = target.repeat(1, N).view(-1)
            #    metric = "Acc"
            #else:
            #    target = target.repeat(1, N).view(-1, D).float()
            #    metric = "mAP"
            target = target.float()
            metric = "mAP"

            loss = criterion(output, target)
            acc = calc_accuracy(output, target)

            losses.update(loss.item(), B)
            accuracy.update(acc.item(), B)

    print('Loss {loss.avg:.4f}\t'
          '{metric}: {acc.avg:.4f} \t'.format(loss=losses,
                                              acc=accuracy,
                                              metric=metric))
    args.writer_val.add_scalar('global/loss', losses.avg, epoch)
    args.writer_val.add_scalar('global/accuracy', accuracy.avg, epoch)

    return losses.avg, accuracy.avg
Пример #8
0
def validate(val_loader, model):
    """Perform validation on the validation set"""
    batch_time = AverageMeter()
    top1 = AverageMeter()

    model.eval()

    end = time.time()
    val_size = len(val_loader)
    val_loader_iter = iter(val_loader)
    bar_format = '{desc}[{elapsed}<{remaining},{rate_fmt}]'
    pbar = tqdm(range(val_size), file=sys.stdout, bar_format=bar_format, ncols=140)
    with torch.no_grad():
        for idx_iter in pbar:
            input, label = next(val_loader_iter)

            input = input.cuda()
            label = label.cuda()

            # compute output
            output = torch.sigmoid(model(input, task='new')[0])
            output = (output + torch.sigmoid(model(torch.flip(input, dims=(3,)), task='new')[0])) / 2

            # accumulate accuracyk
            prec1, gt_num = accuracy(output.data, label, args.num_class, topk=(1,))
            top1.update(prec1[0], gt_num[0])

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            description = "[Acc@1-mean: %.2f][Acc@1-cls: %s]"%(top1.vec2sca_avg, str(top1.avg.numpy().round(1)))
            pbar.set_description("[Step %d/%d]"%(idx_iter + 1, val_size) + description)

    logging.info(' * Prec@1 {top1.vec2sca_avg:.3f}'.format(top1=top1))
    logging.info(' * Prec@1 {top1.avg}'.format(top1=top1))

    return top1.vec2sca_avg
    def __init__(self, config, data_loader):
        self.device = torch.device(
            "cuda:0" if torch.cuda.is_available() else "cpu")
        self.in_channels = config.in_channels
        self.out_channels = config.out_channels

        self.nf = config.nf
        self.storing_channels = config.storing_channels

        self.lr = config.lr
        self.b1 = config.b1
        self.b2 = config.b2
        self.weight_decay = config.weight_decay
        self.decay_epoch = config.decay_epoch
        self.content_loss_factor = config.content_loss_factor
        self.perceptual_loss_factor = config.perceptual_loss_factor
        self.generator_loss_factor = config.generator_loss_factor
        self.discriminator_loss_factor = config.discriminator_loss_factor
        self.penalty_loss_factor = config.penalty_loss_factor
        self.eta = config.eta

        self.checkpoint_dir = config.checkpoint_dir
        self.sample_dir = config.sample_dir
        self.epoch = config.epoch
        self.num_epoch = config.num_epoch
        self.image_size = config.image_size

        self.data_loader = data_loader

        self.content_losses = AverageMeter()
        self.generator_losses = AverageMeter()
        self.perceptual_losses = AverageMeter()
        self.discriminator_losses = AverageMeter()
        self.ae_losses = AverageMeter()

        self.visdom = Visdom()

        self.build_model()
Пример #10
0
def evaluate(loader, model):
    print("Evaluate")

    # Set model to eval
    model.eval()

    accuracy = AverageMeter()
    positive_accuracy = AverageMeter()
    negative_accuracy = AverageMeter()
    with torch.no_grad():
        for batch_idx, (x, y) in enumerate(loader):
            x = x.to(device=device)
            y = y.to(device=device).to(torch.float32)
            y = torch.unsqueeze(y, 1)

            scores = model(x)
            loss = criterion(scores, y)

            scores = torch.squeeze(scores, 1)
            y = torch.squeeze(y, 1)

            acc = get_accuracy(y, scores)
            neg_acc, pos_acc = get_accuracy_per_class(y.cpu(), scores.cpu())

            accuracy.update(acc)
            positive_accuracy.update(pos_acc)
            negative_accuracy.update(neg_acc)

    
    wandb.log({
    "valid_acc": accuracy.avg,
    "positive_acc": positive_accuracy.avg,
    "negative_acc": negative_accuracy.avg,
    "valid_loss": loss.item()
    })

    # Set model back to train
    model.train()
Пример #11
0
    def _valid_epoch(self):
        valid_loss = AverageMeter()
        valid_probs = []

        for step, batch in enumerate(self.valid_loader):
            self.model.eval()
            batch = tuple(t.to(self.device) for t in batch)
            batch_size = batch[1].size(0)

            with torch.no_grad():
                op = batch[0]
                inputs = {
                    "input_ids_a": batch[1],
                    "token_type_ids_a": batch[2],
                    "attention_mask_a": batch[3],
                    "input_ids_b": batch[4],
                    "token_type_ids_b": batch[5],
                    "attention_mask_b": batch[6],
                    "input_ids_c": batch[7],
                    "token_type_ids_c": batch[8],
                    "attention_mask_c": batch[9],
                }
                if self.fts_flag:
                    inputs.update({
                        "x_a": batch[10],
                        "x_b": batch[11],
                        "x_c": batch[12]
                    })
                anchor, positive, negative = self.model(**inputs)

                # loss = self.criterion(anchor, positive, negative)
                loss = self.criterion(op.float(), anchor, positive, negative)
                valid_loss.update(loss.item(), batch_size)

            anchor = anchor.to("cpu").numpy()
            positive = positive.to("cpu").numpy()
            negative = negative.to("cpu").numpy()

            pos_dist = np.sqrt(
                np.sum(np.square(anchor - positive), axis=-1, keepdims=True))
            neg_dist = np.sqrt(
                np.sum(np.square(anchor - negative), axis=-1, keepdims=True))
            probs = pos_dist - neg_dist
            # probs = (op.to("cpu").numpy() * (pos_dist - neg_dist)).diagonal()
            valid_probs.append(probs)
        valid_probs = np.concatenate(valid_probs)

        valid_log = {"val_loss": valid_loss.avg, "val_probs": valid_probs}

        return valid_log
Пример #12
0
def test(opt):
    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str

    Dataset = dataset_factory[opt.dataset]
    opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
    print(opt)
    Logger(opt)
    Detector = detector_factory[opt.task]

    split = 'val' if (not opt.trainval) and (not opt.test) else 'test'
    dataset = Dataset(opt, split)
    detector = Detector(opt)

    results = {}
    num_iters = len(dataset)
    bar = Bar('{}'.format(opt.exp_id), max=num_iters)
    time_stats = ['tot', 'load', 'pre', 'net', 'dec', 'post', 'merge']
    avg_time_stats = {t: AverageMeter() for t in time_stats}
    for ind in range(num_iters):
        img_id = dataset.images[ind]
        img_info = dataset.coco.loadImgs(ids=[img_id])[0]
        img_path = os.path.join(dataset.img_dir, img_info['file_name'])

        n_people = len(dataset.coco.getAnnIds(imgIds=[img_id]))
        if n_people not in times_per_people.keys():
            times_per_people[n_people] = {stat: 0 for stat in time_stats}

        if opt.task == 'ddd':
            ret = detector.run(img_path, img_info['calib'])
        else:
            ret = detector.run(img_path)

        results[img_id] = ret['results']

        Bar.suffix = '[{0}/{1}]|Tot: {total:} |ETA: {eta:} '.format(
            ind, num_iters, total=bar.elapsed_td, eta=bar.eta_td)
        for t in avg_time_stats:
            avg_time_stats[t].update(ret[t])
            Bar.suffix = Bar.suffix + '|{} {:.3f} '.format(
                t, avg_time_stats[t].avg)
            times_per_people[n_people][t] += avg_time_stats[t].avg
        bar.next()
    bar.finish()
    dataset.run_eval(results, opt.save_dir)
    if opt.save_times_per_people:
        out_dir_time = "./time_reports/{}".format(opt.exp_id)
        if not os.path.isdir(out_dir_time):
            os.mkdir(out_dir_time)
        with open("{}/times_per_people.json".format(out_dir_time)) as f:
            json.dump(times_per_people, f)
Пример #13
0
 def __init__(self, sampler, frames_threshold):
     """
     @sampler: will mostly be an instance of DistributedSampler.
     Though it should work with any sampler.
     @frames_threshold: maximum area of the batch
     """
     self.sampler = sampler
     self.frames_threshold = frames_threshold
     # indices = [(idx1, #frames1), (idx2, #frames2), ...]
     # batches = [[idx1, idx2, idx3], [idx4], [idx5, idx6], ...]
     indices, batches = list(), list()
     # the dataset to which these indices are pointing to
     dataset = self.sampler.dataset
     # get all the indices and corresponding durations from
     # the sampler
     for idx in self.sampler:
         indices.append((idx, dataset.utt_list[idx]["mel_frame"]))
     # sort the indices according to duration
     indices.sort(key=lambda elem: int(elem[1]))
     # make sure that you will be able to serve all the utterances
     assert indices[-1][1] < self.frames_threshold, (
         "Won't be able"
         "to serve all sequences. frames_threshold={} while longest"
         " utterance has {} frames").format(self.frames_threshold,
                                            indices[-1][1])
     # start clubbing the utterances together
     batch = list()
     batch_frames, batch_area = 0, 0
     average_meter = AverageMeter('Padding Efficiency')
     for idx, frames in indices:
         # consider adding this utterance to the current batch
         if batch_frames + frames <= self.frames_threshold:
             # can add to the batch
             batch.append(idx)
             batch_frames += frames
             batch_area = frames * len(batch)
             # print('idx=',idx)
             # print('frames=',frames)
             # print('batch_area=',batch_area)
         else:
             # log the stats and add previous batch to batches
             average_meter.add(batch_frames, batch_area)
             batches.append(batch)
             # make a new one
             batch = list([idx])
             batch_frames, batch_area = frames, frames
     # don't need the 'indices' any more
     del indices
     self.batches = batches
     average_meter.display_results()
Пример #14
0
def valid(model_R, val_loader):
    error_names = ['valid loss']
    errors = AverageMeter(i=len(error_names))
    model_R.eval()

    with torch.no_grad():
        for i, (left_img, right_img) in enumerate(val_loader):
            left_img = left_img.to(device)
            right_img = right_img.to(device)
            disp_pre = model_R(left_img, right_img)
            error = Img_warp_loss(disp_pre, left_img, right_img)
            errors.update([error.item()], args.batch_size)

    return errors.avg, error_names
Пример #15
0
    def infer(self):

        self.load_model_for_eval()
        avg_test_loss = AverageMeter()
        avg_test_acc = AverageMeter()
        text_avg_test_acc = AverageMeter()
        combined_avg_test_acc = AverageMeter()

        for batch_idx, batch in enumerate(tqdm(self.test_loader)):
            # Get the model output and update the meters
            output = self.compute_loss(batch)
            avg_test_acc.update(output['correct'].cpu().numpy())
            text_avg_test_acc.update(output['text_correct'].cpu().numpy())
            combined_avg_test_acc.update(
                output['combined_correct'].cpu().numpy())

            avg_test_loss.update([output['loss']])

        print(
            'Final test acc (audio) = {:.4f}, final test acc (text) = {:.4f}, final test acc (combined system) = {:.4f}, test loss = {:.4f}'
            .format(avg_test_acc.get(), text_avg_test_acc.get(),
                    combined_avg_test_acc.get(), avg_test_loss.get()))
        return avg_test_loss.get(), avg_test_acc.get(), text_avg_test_acc.get()
Пример #16
0
    def infer(self):
        self.load_model_for_eval()
        avg_test_loss = AverageMeter()
        avg_test_acc = AverageMeter()
        all_true_labels = []
        all_pred_labels = []
        all_audio_embeddings = []
        all_text_embeddings = []

        for batch_idx, batch in enumerate(tqdm(self.test_loader)):
            # Get the model output and update the meters
            output = self.compute_loss(batch)
            avg_test_acc.update(output['correct'].cpu().numpy())
            avg_test_loss.update([output['loss']])

            # Store the Predictions
            all_true_labels.append(batch['label'].cpu())
            all_pred_labels.append(output['predicted'].cpu())
            all_audio_embeddings.append(output['model_output']['audio_embed'].cpu())
            all_text_embeddings.append(output['model_output']['text_embed'].cpu())

        # Collect the predictions and embeddings for the full set
        all_true_labels = torch.cat(all_true_labels).numpy()
        all_pred_labels = torch.cat(all_pred_labels).numpy()
        all_audio_embeddings = torch.cat(all_audio_embeddings).numpy()
        all_text_embeddings = torch.cat(all_text_embeddings).numpy()

        # Save the embeddings and plot the confusion matrix
        np.savez_compressed('embeddings.npz',
                            audio=all_audio_embeddings,
                            text=all_text_embeddings,
                            labels=all_true_labels)
        cm = confusion_matrix(all_true_labels, all_pred_labels)
        plot_confusion_matrix(cm, self.test_loader.dataset.labels_list(), normalize=True)

        print('Final test acc = {:.4f}, test loss = {:.4f}'.format(avg_test_acc.get(), avg_test_loss.get()))
        return avg_test_loss.get(), avg_test_acc.get()
Пример #17
0
def validate(data_loader, model, criterion, device, epoch, args):
    losses = AverageMeter()
    accuracy = [
        [AverageMeter(), AverageMeter()],  # forward top1, top5
        [AverageMeter(), AverageMeter()]
    ]  # backward top1, top5

    model.eval()

    with torch.no_grad():
        for idx, input_seq in enumerate(data_loader):
            input_seq = input_seq.to(device)
            B = input_seq.size(0)
            loss, loss_step, acc, extra = model(input_seq)

            for i in range(2):
                top1, top5 = acc[i].mean(0)  # average acc across multi-gpus
                accuracy[i][0].update(top1.item(), B)
                accuracy[i][1].update(top5.item(), B)

            loss = loss.mean()  # average loss across multi-gpus
            losses.update(loss.item(), B)

    print('Epoch: [{0}/{1}]\t'
          'Loss {loss.val:.6f}\t'
          'Acc: {acc[0][0].val:.4f}\t'.format(epoch,
                                              args.epochs,
                                              loss=losses,
                                              acc=accuracy))

    args.writer_val.add_scalar('global/loss', losses.avg, epoch)
    args.writer_val.add_scalar('global/F-top1', accuracy[0][0].avg, epoch)
    args.writer_val.add_scalar('global/F-top5', accuracy[0][1].avg, epoch)
    args.writer_val.add_scalar('global/B-top1', accuracy[1][0].avg, epoch)
    args.writer_val.add_scalar('global/B-top5', accuracy[1][1].avg, epoch)

    return losses.avg, np.mean([accuracy[0][0].avg, accuracy[1][0].avg])
Пример #18
0
    def train_epoch(self, data):
        tr_loss = AverageMeter()
        self.epoch_reset()
        update=0
        for step,  batch in enumerate(data):
            self.model.train()

            input_ids,src_len, input_mask, segment_ids, src_token,label_ids,tgt_len,tgt_token = batch
            input_ids = input_ids.to(self.device)
            input_mask = input_mask.to(self.device)
            segment_ids = segment_ids.to(self.device)
            label_ids = label_ids.to(self.device)

            logits = self.model(input_ids, input_mask, segment_ids)
            loss = self.criterion(output=logits, target=label_ids)
            if len(self.n_gpu) >= 2:
                loss = loss.mean()
            if self.gradient_accumulation_steps > 1:
                loss = loss / self.gradient_accumulation_steps
            else:
                loss.backward()
                clip_grad_norm_(self.model.parameters(), self.grad_clip)

            if (step + 1) % self.gradient_accumulation_steps == 0:
                self.optimizer.step()
                self.optimizer.zero_grad()
                self.global_step += 1
            tr_loss.update(loss.item(), n=1)

            #存储训练过程中的输出和目标值
            self.outputs.append(logits.cpu().detach())
            self.targets.append(label_ids.cpu().detach())
            update+=1
            progress_bar(update, self.config.train_nbatchs)

        print("\n------------- train result --------------")
        # epoch metric
        self.outputs = torch.cat(self.outputs, dim=0).cpu().detach()
        self.targets = torch.cat(self.targets, dim=0).cpu().detach()
        self.result['loss'] = tr_loss.avg
        if self.epoch_metrics:
            for metric in self.epoch_metrics:
                metric(logits=self.outputs, target=self.targets)
                value = metric.value()
                if value:
                    self.result[f'{metric.name()}'] = value
        if "cuda" in str(self.device):
            torch.cuda.empty_cache()
        return self.result
Пример #19
0
def test(opt):
    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str

    Dataset = dataset_factory[opt.dataset[0]]
    Dataset2 = dataset_factory[opt.dataset[1]]

    opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
    print(opt)
    Logger(opt)
    Detector = detector_factory[opt.task]

    split = opt.test_dataset
    dataset = Dataset(opt, split)

    dataset2 = Dataset2(opt, split)

    detector = Detector(opt)

    results = {}
    num_iters = len(dataset)
    bar = Bar('{}'.format(opt.exp_id), max=num_iters)
    time_stats = ['tot', 'load', 'pre', 'net', 'dec', 'post', 'merge']
    avg_time_stats = {t: AverageMeter() for t in time_stats}
    for ind in range(num_iters):
        img_id = dataset.images[ind]
        img_info = dataset.coco.loadImgs(ids=[img_id])[0]
        img_path = os.path.join(dataset.img_dir, img_info['file_name'])
        if detector.model._name == 'fusion':
            img_path2 = os.path.join(dataset2.img_dir, img_info['file_name'])

        if opt.task == 'ddd':
            ret = detector.run(img_path, img_info['calib'])
        else:
            if detector.model._name == 'fusion':
                ret = detector.run(img_path, img_path2)
            else:
                ret = detector.run(img_path)

        results[img_id] = ret['results']

        Bar.suffix = '[{0}/{1}]|Tot: {total:} |ETA: {eta:} '.format(
            ind, num_iters, total=bar.elapsed_td, eta=bar.eta_td)
        for t in avg_time_stats:
            avg_time_stats[t].update(ret[t])
            Bar.suffix = Bar.suffix + '|{} {:.3f} '.format(
                t, avg_time_stats[t].avg)
        bar.next()
    bar.finish()
    dataset.run_eval(results, opt.save_dir)
Пример #20
0
def test(opt):
    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str

    Dataset = dataset_factory[opt.dataset]
    opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
    print(opt)
    Logger(opt)
    Detector = detector_factory[opt.task]

    split = 'val' if not opt.trainval else 'test'
    dataset = Dataset(opt, split)
    detector = Detector(opt)

    if opt.task == 'car_pose_6dof':
        # pass loaded 3D models for debug visualisations
        detector.set_models(dataset.models)

    results = {}
    num_iters = len(dataset)
    bar = Bar('{}'.format(opt.exp_id), max=num_iters)
    time_stats = ['tot', 'load', 'pre', 'net', 'dec', 'post', 'merge']
    avg_time_stats = {t: AverageMeter() for t in time_stats}
    for ind in range(num_iters):
        img_id = dataset.images[ind]

        if opt.task == 'car_pose_6dof':
            img_path = os.path.join(dataset.img_dir, img_id + '.jpg')
            ret = detector.run(img_path, img_id, dataset.calib)
        else:
            img_info = dataset.coco.loadImgs(ids=[img_id])[0]
            img_info['file_name']
            img_path = os.path.join(dataset.img_dir, img_info['file_name'])
            img_name = img_info['file_name'][:-4]
            if opt.task == 'ddd':
                ret = detector.run(img_path, img_name, img_info['calib'])
            else:
                ret = detector.run(img_path, img_name)

        results[img_id] = ret['results']

        Bar.suffix = '[{0}/{1}]|Tot: {total:} |ETA: {eta:} '.format(
            ind, num_iters, total=bar.elapsed_td, eta=bar.eta_td)
        for t in avg_time_stats:
            avg_time_stats[t].update(ret[t])
            Bar.suffix = Bar.suffix + '|{} {:.3f} '.format(
                t, avg_time_stats[t].avg)
        bar.next()
    bar.finish()
    dataset.run_eval(results, opt.save_dir)
Пример #21
0
def validate(config, testloader, model, writer_dict, device):
    rank = get_rank()
    world_size = get_world_size()
    model.eval()
    ave_loss = AverageMeter()
    tot_inter = np.zeros(config.DATASET.NUM_CLASSES)
    tot_union = np.zeros(config.DATASET.NUM_CLASSES)
    with torch.no_grad():
        for i_iter, batch in enumerate(testloader):
            image, label, _, _ = batch
            size = label.size()
            label = label.long().to(device)
            image = image.to(device)

            loss, pred = model(image, label)
            if pred.size()[-2] != size[-2] or pred.size()[-1] != size[-1]:
                pred = F.interpolate(pred,
                                     size=(size[-2], size[-1]),
                                     mode='bilinear',
                                     align_corners=False)
            reduced_loss = reduce_tensor(loss)
            ave_loss.update(reduced_loss.item())

            batch_inter, batch_union = batch_intersection_union(
                pred, label, config.DATASET.NUM_CLASSES)
            tot_inter += batch_inter
            tot_union += batch_union

            if i_iter % config.PRINT_FREQ == 0 and rank == 0:
                msg = f'Iter: {i_iter}, Loss: {ave_loss.average() / world_size:.6f}'
                logging.info(msg)

    tot_inter = torch.from_numpy(tot_inter).to(device)
    tot_union = torch.from_numpy(tot_union).to(device)
    tot_inter = reduce_tensor(tot_inter).cpu().numpy()
    tot_union = reduce_tensor(tot_union).cpu().numpy()
    IoU = np.float64(1.0) * tot_inter / (np.spacing(1, dtype=np.float64) +
                                         tot_union)
    mean_IoU = IoU.mean()
    print_loss = ave_loss.average() / world_size

    if rank == 0:
        writer = writer_dict['writer']
        global_steps = writer_dict['valid_global_steps']
        writer.add_scalar('valid_loss', print_loss, global_steps)
        writer.add_scalar('valid_mIoU', mean_IoU, global_steps)
        writer_dict['valid_global_steps'] = global_steps + 1

    return print_loss, mean_IoU
Пример #22
0
def test(opt):
    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str

    Dataset = dataset_factory[opt.dataset]
    opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
    print(opt)
    Logger(opt)
    Detector = detector_factory[opt.task]

    split = 'val' if not opt.trainval else 'test'
    dataset = Dataset(opt, split)
    detector = Detector(opt)

    results = {}
    num_iters = len(dataset)
    bar = Bar('{}'.format(opt.exp_id), max=num_iters)
    time_stats = ['tot', 'load', 'pre', 'net', 'dec', 'post', 'merge']
    avg_time_stats = {t: AverageMeter() for t in time_stats}

    f = open("fps_test.txt", 'w')  #fps_write

    for ind in range(num_iters):
        img_id = dataset.images[ind]
        img_info = dataset.coco.loadImgs(ids=[img_id])[0]
        img_path = os.path.join(dataset.img_dir, img_info['file_name'])
        print(img_path)

        #print fps
        start_time = time.time()

        if opt.task == 'ddd':
            ret = detector.run(img_path, img_info['calib'])
        else:
            ret = detector.run(img_path)

        results[img_id] = ret['results']

        Bar.suffix = '[{0}/{1}]|Tot: {total:} |ETA: {eta:} '.format(
            ind, num_iters, total=bar.elapsed_td, eta=bar.eta_td)
        for t in avg_time_stats:
            avg_time_stats[t].update(ret[t])
            Bar.suffix = Bar.suffix + '|{} {:.3f} '.format(
                t, avg_time_stats[t].avg)
        bar.next()

        f.write(str(1.0 / (time.time() - start_time)) + '\n')  # fps_write

    bar.finish()
    dataset.run_eval(results, opt.save_dir)
Пример #23
0
def contrastive_evaluate(val_loader, model, memory_bank):
    top1 = AverageMeter('Acc@1', ':6.2f')
    model.eval()

    for batch in val_loader:
        images = batch['image'].cuda(non_blocking=True)
        target = batch['target'].cuda(non_blocking=True)

        output = model(images)
        output = memory_bank.weighted_knn(output)

        acc1 = 100 * torch.mean(torch.eq(output, target).float())
        top1.update(acc1.item(), images.size(0))

    return top1.avg
Пример #24
0
def train(model, data_loader, optimizer, lr_scheduler, epoch, LOG):

    stages = 4
    losses = [AverageMeter() for _ in range(stages)]
    length_loader = len(data_loader)

    model.train()

    for batch_id, data in enumerate(data_loader()):
        left_img, right_img, gt = data

        mask = paddle.to_tensor(gt.numpy() > 0)
        gt_mask = paddle.masked_select(gt, mask)

        outputs = model(left_img, right_img)
        outputs = [paddle.squeeze(output) for output in outputs]

        tem_stage_loss = []
        for index in range(stages):
            temp_loss = args.loss_weights[index] * F.smooth_l1_loss(
                paddle.masked_select(outputs[index], mask),
                gt_mask,
                reduction='mean')
            tem_stage_loss.append(temp_loss)
            losses[index].update(
                float(temp_loss.numpy() / args.loss_weights[index]))

        sum_loss = paddle.add_n(tem_stage_loss)
        sum_loss.backward()
        optimizer.step()
        optimizer.clear_grad()

        if batch_id % 5 == 0:
            info_str = [
                'Stage {} = {:.2f}({:.2f})'.format(x, losses[x].val,
                                                   losses[x].avg)
                for x in range(stages)
            ]
            info_str = '\t'.join(info_str)
            info_str = 'Train Epoch{} [{}/{}]  lr:{:.5f}\t{}'.format(
                epoch, batch_id, length_loader, optimizer.get_lr(), info_str)
            LOG.info(info_str)

    lr_scheduler.step()

    info_str = '\t'.join(
        ['Stage {} = {:.2f}'.format(x, losses[x].avg) for x in range(stages)])
    LOG.info('Average train loss: ' + info_str)
Пример #25
0
def test(opt):
  os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str

  Dataset = dataset_factory[opt.test_dataset]
  opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
  print(opt)
  #Logger(opt)
  
  split = 'val' if not opt.trainval else 'test'
  dataset = Dataset(opt, split)
  detector = Detector(opt)

  if opt.load_results != '': # load results in json
    load_results = json.load(open(opt.load_results, 'r'))

  results = {}
  num_iters = len(dataset) if opt.num_iters < 0 else opt.num_iters
  bar = Bar('{}'.format(opt.exp_id), max=num_iters)
  time_stats = ['tot', 'load', 'pre', 'net', 'dec', 'post', 'merge']
  avg_time_stats = {t: AverageMeter() for t in time_stats}
  for ind in range(num_iters):
    img_id = dataset.images[ind]
    img_info = dataset.coco.loadImgs(ids=[img_id])[0]
    img_path = os.path.join(dataset.img_dir, img_info['file_name'])
    input_meta = {}
    if 'calib' in img_info:
      input_meta['calib'] = img_info['calib']
    if (opt.tracking and ('frame_id' in img_info) and img_info['frame_id'] == 1):
      detector.reset_tracking()
      input_meta['pre_dets'] = load_results[img_id]

    ret = detector.run(img_path, input_meta)    
    results[img_id] = ret['results']

    Bar.suffix = '[{0}/{1}]|Tot: {total:} |ETA: {eta:} '.format(
                   ind, num_iters, total=bar.elapsed_td, eta=bar.eta_td)
    for t in avg_time_stats:
      avg_time_stats[t].update(ret[t])
      Bar.suffix = Bar.suffix + '|{} {:.3f} '.format(t, avg_time_stats[t].avg)
    bar.next()
  bar.finish()
  if opt.save_results:
    print('saving results to', opt.save_dir + '/save_results_{}{}.json'.format(
      opt.test_dataset, opt.dataset_version))
    json.dump(_to_list(copy.deepcopy(results)), 
              open(opt.save_dir + '/save_results_{}{}.json'.format(
                opt.test_dataset, opt.dataset_version), 'w'))
  dataset.run_eval(results, opt.save_dir)
Пример #26
0
def validate(config, testloader, model, writer_dict, device):
    
    rank = get_rank()
    world_size = get_world_size()
    model.eval()
    ave_loss = AverageMeter()
    confusion_matrix = np.zeros(
        (config.DATASET.NUM_CLASSES, config.DATASET.NUM_CLASSES))

    with torch.no_grad():
        for _, batch in enumerate(testloader):
            image, label, _, _ = batch
            size = label.size()
            image = image.to(device)
            label = label.long().to(device)

            losses, pred = model(image, label)
            pred = F.upsample(input=pred, size=(
                        size[-2], size[-1]), mode='bilinear')
            loss = losses.mean()
            reduced_loss = reduce_tensor(loss)
            ave_loss.update(reduced_loss.item())

            confusion_matrix += get_confusion_matrix(
                label,
                pred,
                size,
                config.DATASET.NUM_CLASSES,
                config.TRAIN.IGNORE_LABEL)

    confusion_matrix = torch.from_numpy(confusion_matrix).to(device)
    reduced_confusion_matrix = reduce_tensor(confusion_matrix)

    confusion_matrix = reduced_confusion_matrix.cpu().numpy()
    pos = confusion_matrix.sum(1)
    res = confusion_matrix.sum(0)
    tp = np.diag(confusion_matrix)
    IoU_array = (tp / np.maximum(1.0, pos + res - tp))
    mean_IoU = IoU_array.mean()
    print_loss = ave_loss.average()/world_size

    if rank == 0:
        writer = writer_dict['writer']
        global_steps = writer_dict['valid_global_steps']
        writer.add_scalar('valid_loss', print_loss, global_steps)
        writer.add_scalar('valid_mIoU', mean_IoU, global_steps)
        writer_dict['valid_global_steps'] = global_steps + 1
    return print_loss, mean_IoU, IoU_array
def train(gc_net, optimizer):
    loss_names = ['loss train', 'acc train']
    losses = AverageMeter(i=len(loss_names), precision=4)

    gc_net.train()

    optimizer.zero_grad()
    output = gc_net(features, adj)
    loss_train = F.nll_loss(output[idx_train], labels[idx_train])
    acc_train = accuracy(output[idx_train], labels[idx_train])
    loss_train.backward()
    optimizer.step()

    losses.update([loss_train.item(), acc_train.item()])

    return losses.avg, loss_names
Пример #28
0
    def update_states(self, states, batch_size=1):

        if len(self.states) == 0:
            state_names = states.keys()
            self.states = OrderedDict(
                [(key, 0) for key in state_names]
            )

            self.average_meters = OrderedDict(
                [(key, AverageMeter())
                 for key in state_names]
            )

        self.states.update(states)
        for key, meter in self.average_meters.items():
            meter.update(self.states[key], batch_size)
Пример #29
0
def train(epoch):

    # define meters
    loss_meter = AverageMeter()

    # put model into training mode
    model.train()

    for param_group in optimizer.param_groups:
        print('learning rate: {}'.format(param_group['lr']))

    for i, sample in enumerate(tqdm(train_dataset_it)):

        im = sample['image']
        instances = sample['instance'].squeeze()
        class_labels = sample['label'].squeeze()

        output = model(im)
        loss = criterion(output, instances, class_labels, **args['loss_w'])
        loss = loss.mean()

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if args['display'] and i % args['display_it'] == 0:
            with torch.no_grad():
                visualizer.display(im[0], 'image')

                predictions = cluster.cluster_with_gt(
                    output[0],
                    instances[0],
                    n_sigma=args['loss_opts']['n_sigma'])
                visualizer.display([predictions.cpu(), instances[0].cpu()],
                                   'pred')

                sigma = output[0][2].cpu()
                sigma = (sigma - sigma.min()) / (sigma.max() - sigma.min())
                sigma[instances[0] == 0] = 0
                visualizer.display(sigma, 'sigma')

                seed = torch.sigmoid(output[0][3]).cpu()
                visualizer.display(seed, 'seed')

        loss_meter.update(loss.item())

    return loss_meter.avg
Пример #30
0
def prefetch_test(opt):
    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str

    Dataset = dataset_factory[opt.dataset]
    opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
    print(opt)
    loger = Logger(opt)
    Detector = detector_factory[opt.task]

    split = 'val' if not opt.test else 'test'
    dataset = Dataset(opt, split)
    detector = Detector(opt)

    data_loader = torch.utils.data.DataLoader(
        PrefetchDataset(opt, dataset, detector.pre_process),
        batch_size=1, shuffle=False, num_workers=0, pin_memory=True,
        collate_fn=test_collate_fn
    )

    results = {}
    num_iters = len(dataset)
    bar = Bar('{}'.format(opt.exp_id), max=num_iters)
    time_stats = ['tot', 'load', 'pre', 'net', 'dec', 'post', 'merge']
    avg_time_stats = {t: AverageMeter() for t in time_stats}
    for ind, (img_id, pre_processed_images) in enumerate(data_loader):
        # pre_processed_images['img_info'] = dataset.coco.loadImgs([img_id])[0]
        ret = detector.run(pre_processed_images)
        results[img_id] = ret['results']
        Bar.suffix = '[{0}/{1}]|Tot: {total:} |ETA: {eta:} '.format(
            ind, num_iters, total=bar.elapsed_td, eta=bar.eta_td)
        # if opt.debug:
        #     # img = pre_processed_images['image'].detach().cpu().numpy().squeeze()
        #     print(type(ret['results']))
        #     vis_dets(ret, img_id, dataset, split=split)
        for t in avg_time_stats:
            avg_time_stats[t].update(ret[t])
            Bar.suffix += '|{} {tm.val:.3f}s ({tm.avg:.3f}s) '.format(
                t, tm=avg_time_stats[t])
        bar.next()
    bar.finish()
    sava_path = opt.save_dir + '/{}_results'.format(split)
    dataset.save_pkl(results, sava_path)
    dataset.save_dota_format(results, sava_path)
    if split != 'test':
        dataset.dota_eval(sava_path + '/Task1_{:s}.txt')
        mean_ap, eval_results = dataset.coco_eval(results)
        print(mean_ap, eval_results)