예제 #1
0
        optimizer = torch.optim.RMSprop(net.parameters(), lr=config["lr_rate"])
    elif optimizer_name == "sgd":
        optimizer = torch.optim.SGD(
            net.parameters(), lr=config["lr_rate"], momentum=0.9
        )
    else:
        raise ValueError(
            "unknown optimizer {}, expect one of (Adam, RMSprop, SGD)".format(
                config["optimizer"]
            )
        )

    # If a model checkpoint has been specified try to load its weights
    start_epoch = 1
    metrics = metric.MetricList(
        [metric.AirbusFScoreApprox(), metric.BinaryIoU(), metric.BinaryDice()]
    )
    if args.model_checkpoint:
        print("Loading weights from {}...".format(args.model_checkpoint))
        checkpoint = torch.load(args.model_checkpoint, map_location=torch.device("cpu"))
        net.load_state_dict(checkpoint["model"])

        # If the --resume flag is specified, training will continue from the checkpoint
        # as if it was never aborted. Otherwise, training will take only the already
        # loaded weights start from scratch
        if args.resume:
            start_epoch = checkpoint["epoch"] + 1
            optimizer.load_state_dict(checkpoint["optimizer"])
            metrics = checkpoint["metrics"]
            print(
                "Resuming training from epoch {}: Metrics - {}".format(
    if args.resume_epoch < 0:
        epoch_start = 0
        step_counter = 0
    else:
        # net.load_checkpoint(epoch=args.resume_epoch, optimizer=optimizer)
        epoch_start = args.resume_epoch
        step_counter = epoch_start * train_iter.__len__()

    # set learning rate scheduler
    num_worker = 4
    lr_scheduler = MultiFactorScheduler(
        base_lr=args.lr_base,
        steps=[int(x / (args.batch_size * num_worker)) for x in args.lr_steps],
        factor=args.lr_factor,
        step_counter=step_counter)
    # define evaluation metric
    metrics = metric.MetricList(metric.Loss(name="loss-ce"), )
    # enable cudnn tune
    cudnn.benchmark = True
    start_time = time.time()
    net.fit(train_iter=train_iter,
            eval_iter=None,
            optimizer=optimizer,
            lr_scheduler=lr_scheduler,
            metrics=metrics,
            epoch_start=epoch_start,
            epoch_end=args.end_epoch)
    real_time = time.time() - start_time
    print("Finished training!")
    print("Time cost:" + real_time)
예제 #3
0
파일: model.py 프로젝트: zp672087110/GloRe
    # initializatioln the dynamic model
    net = model(net=sym_c3d,
                optimizer=optimizer,
                criterion=torch.nn.CrossEntropyLoss().cuda())

    # load the pretained model
    if resume:
        net.load_checkpoint(epoch=load_epoch)
    elif pretained:
        pretrained_model_state_dic = GetPretrainedModel(name='resnet')
        net.load_state(state_dic=pretrained_model_state_dic, strict=False)
    else:
        logging.info("Train from scratch using random initialization")

    # prepare opmitization
    metrics = metric.MetricList(metric.Accuracy(topk=1, name="acc-top1"),
                                metric.Accuracy(topk=5, name="acc-top5"))
    lr_scheduler = MultiFactorScheduler(steps=[300, 1000],
                                        base_lr=0.1,
                                        factor=0.1)

    tr_iter, ts_iter = dataiter_factory.creat(
        name='ucf101',
        data_root='../../dataset/UCF101',
        batch_size=1,
    )

    net.fit(
        iter_train=tr_iter,
        metrics_train=metrics,
        epoch_start=0,
        epoch_end=100,
    # Initialize ship or no-ship detection network
    num_classes = 1
    print("Loading ship detection model ({})...".format(config["resnet_size"]))
    net = models.resnet(config["resnet_size"], num_classes)
    print(net)

    # Loss function: binary cross entropy with logits. Expects logits therefore the
    # output layer must return a logits instead of probabilities
    criterion = torch.nn.BCEWithLogitsLoss()

    # Optimizer: adam
    optimizer = torch.optim.Adam(net.parameters(), lr=config["lr_rate"])

    # If a model checkpoint has been specified try to load its weights
    start_epoch = 1
    metrics = metric.MetricList([metric.Accuracy()])
    if args.model_checkpoint:
        print("Loading weights from {}...".format(args.model_checkpoint))
        checkpoint = torch.load(args.model_checkpoint,
                                map_location=torch.device("cpu"))
        net.load_state_dict(checkpoint["model"])

        # If the --resume flag is specified, training will continue from the checkpoint
        # as if it was never aborted. Otherwise, training will take only the already
        # loaded weights start from scratch
        if args.resume:
            start_epoch = checkpoint["epoch"] + 1
            optimizer.load_state_dict(checkpoint["optimizer"])
            metrics = checkpoint["metrics"]
            print("Resuming training from epoch {}: Metrics - {}".format(
                start_epoch, metrics))
예제 #5
0
def test(data_loader, model, opt, class_names):
    print('test')

    model.eval()

    # eval metrics
    metrics = metric.MetricList(
        metric.Accuracy(topk=1, name="top1"),
        metric.Accuracy(topk=2, name="top2"),
        metric.Accuracy(topk=3, name="top3"),
        metric.Accuracy(topk=4, name="top4"),
        metric.Accuracy(topk=5, name="top5"),
    )
    metrics.reset()

    avg_score = {}
    sum_batch_elapse = 0.
    sum_batch_inst = 0
    duplication = 1
    total_round = 1

    out_target = []
    out_output = []

    with open('datasets/template.csv', 'r') as f:
        template_sample = {}
        for line in f.readlines():
            name = line.split(',')[0]
            template_sample[name] = -1

    interval = data_loader.__len__() // 10
    for i_round in range(total_round):

        i_batch = 0
        print("round #{}/{}".format(i_round, total_round))

        with torch.no_grad():
            for i, (inputs, targets, bbox) in enumerate(data_loader):
                # data_time.update(time.time() - end_time)
                batch_start_time = time.time()
                targets_ori = targets[0].cuda()

                if opt.model == 'slowfast':
                    slow = inputs[:, :, ::8, :, :]
                    fast = inputs[:, :, ::2, :, :]
                    outputs = model([slow, fast])
                else:
                    outputs = model(inputs)

                output_np = outputs.data.cpu().numpy()
                target_np = targets_ori.data.cpu().numpy()
                out_output.append(output_np)
                out_target.append(target_np[:, np.newaxis])

                sum_batch_elapse += time.time() - batch_start_time
                sum_batch_inst += 1
                if not opt.no_softmax_in_test:
                    outputs = F.softmax(outputs, dim=1)

                outputs = outputs.data.cpu()
                # targets = targets.cpu()

                for i_item in range(0, outputs.shape[0]):
                    output_i = outputs[i_item, :].view(1, -1)
                    target_i = torch.LongTensor([targets[0][i_item]])
                    video_subpath_i = targets[1][i_item]
                    if video_subpath_i in avg_score:
                        avg_score[video_subpath_i][1] += output_i
                        avg_score[video_subpath_i][2] += 1
                        duplication = 0.92 * duplication + 0.08 * avg_score[
                            video_subpath_i][2]
                    else:
                        avg_score[video_subpath_i] = [
                            torch.LongTensor(target_i.numpy().copy()),
                            torch.FloatTensor(output_i.numpy().copy()), 1
                        ]  # the last one is counter

                    # show progress
                if (i_batch % interval) == 0:
                    metrics.reset()
                    for _, video_info in avg_score.items():
                        target, pred, _ = video_info
                        metrics.update([pred], target)
                    name_value = metrics.get_name_value()
                    print(
                        "{:.1f}%, {:.1f} \t| Batch [0,{}]    \tAvg: {} = {:.5f}, {} = {:.5f}".format(
                            float(100 * i_batch) / data_loader.__len__(), \
                            duplication, \
                            i_batch, \
                            name_value[0][0][0], name_value[0][0][1], \
                            name_value[1][0][0], name_value[1][0][1]))
                i_batch += 1

        # finished
        print("Evaluation one epoch Finished!")

        # savefig
        output_array = np.concatenate(out_output, axis=0)
        target_array = np.concatenate(out_target, axis=0)
        if opt.annotation_path.endswith('split.json'):
            name = 'AUTSL_' + opt.model + '.npy'
            pkl_name = 'AUTSL_' + opt.model + '2_all.pkl'
        else:
            name = 'AUTSL_' + opt.model + '_all.npy'
            pkl_name = 'AUTSL_' + opt.model + '_all.pkl'
        # np.save(os.path.join(name), output_array, allow_pickle=False)

        import pickle
        with open(pkl_name, 'wb') as f:
            pickle.dump(avg_score, f)

        metrics.reset()
        class_num = {}
        class_acc = {}
        for _, video_info in avg_score.items():
            # total video
            target, pred, _ = video_info
            metrics.update([pred], target)

            # class acc
            if target.item() not in class_num:
                class_num[target.item()] = 1
            else:
                class_num[target.item()] += 1

            _, pred_topk = pred.topk(1, 1, True, True)

            pred_topk = pred_topk.t()
            correct = pred_topk.eq(target.view(1, -1).expand_as(pred_topk))
            if target.item() not in class_acc:
                # class_acc[target.item()] = correct.item()
                class_acc[target.item()] = float(
                    correct.view(-1).float().sum(0, keepdim=True).numpy())
            else:
                # class_acc[target.item()] += correct.item()
                class_acc[target.item()] += float(
                    correct.view(-1).float().sum(0, keepdim=True).numpy())

        for video_name, video_info in avg_score.items():
            target, pred, _ = video_info
            template_sample[video_name] = torch.argmax(pred).item()
        # with open('predictions.csv', 'w') as f2:
        #     for k, v in template_sample.items():
        #         line = k + ',' + str(v) + '\n'
        #         f2.writelines(line)

        print("Total time cost: {:.1f} sec".format(sum_batch_elapse))
        print("Speed: {:.4f} samples/sec".format(
            opt.batch_size * sum_batch_inst / sum_batch_elapse))
        print("Accuracy:")
        print(json.dumps(metrics.get_name_value(), indent=4, sort_keys=True))