Пример #1
0
def evaluate(args):
    assert torch.cuda.is_available(), 'CUDA is not available.'
    torch.backends.cudnn.enabled = True
    torch.backends.cudnn.benchmark = True

    print('The image is {:}'.format(args.image))
    print('The model is {:}'.format(args.model))
    snapshot = Path(args.model)
    assert snapshot.exists(), 'The model path {:} does not exist'
    facebox=face_detect(args.image,args.face_detector)

    print('The face bounding box is {:}'.format(facebox))
    assert len(facebox)==4,'Invalid face input : {:}'.format(facebox)
    snapshot = torch.load(str(snapshot))

    # General Data Argumentation
    mean_fill = tuple([int(x * 255) for x in [0.485, 0.456, 0.406]])
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    param = snapshot['args']
    eval_transform = transforms.Compose(
        [transforms.PreCrop(param.pre_crop_expand), transforms.TrainScale2WH((param.crop_width, param.crop_height)),
         transforms.ToTensor(), normalize])
    model_config = load_configure(param.model_config, None)
    dataset = GeneralDataset(eval_transform, param.sigma, model_config.downsample, param.heatmap_type, param.data_indicator)
    dataset.reset(param.num_pts)

    net = obtain_model(model_config, param.num_pts + 1)
    net = net.cuda()
    weights = remove_module_dict(snapshot['state_dict'])
    net.load_state_dict(weights)
    print('Prepare input data')
    [image, _, _, _, _, _, cropped_size], meta = dataset.prepare_input(args.image, facebox)
    inputs = image.unsqueeze(0).cuda()
    # network forward
    with torch.no_grad():
        batch_heatmaps, batch_locs, batch_scos = net(inputs)
    # obtain the locations on the image in the orignial size
    cpu = torch.device('cpu')
    np_batch_locs, np_batch_scos, cropped_size = batch_locs.to(cpu).numpy(), batch_scos.to(
        cpu).numpy(), cropped_size.numpy()
    locations, scores = np_batch_locs[0, :-1, :], np.expand_dims(np_batch_scos[0, :-1], -1)

    scale_h, scale_w = cropped_size[0] * 1. / inputs.size(-2), cropped_size[1] * 1. / inputs.size(-1)

    locations[:, 0], locations[:, 1] = locations[:, 0] * scale_w + cropped_size[2], locations[:, 1] * scale_h + \
                                       cropped_size[3]
    prediction = np.concatenate((locations, scores), axis=1).transpose(1, 0)

    print('the coordinates for {:} facial landmarks:'.format(param.num_pts))
    for i in range(param.num_pts):
        point = prediction[:, i]
        print('the {:02d}/{:02d}-th point : ({:.1f}, {:.1f}), score = {:.2f}'.format(i+1, param.num_pts, float(point[0]),
                                                                                     float(point[1]), float(point[2])))
    image = draw_image_by_points(args.image, prediction, 2, (255, 0, 0), facebox, None,None)
    image.show()
    image.save(args.image.split('.')[0]+'_result.jpg')
Пример #2
0
def build_transforms(config):
    transform_train = T.Compose([
        T.RandomCroping(config.DATA.HEIGHT,
                        config.DATA.WIDTH,
                        p=config.AUG.RC_PROB),
        T.RandomHorizontalFlip(),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        T.RandomErasing(probability=config.AUG.RE_PROB)
    ])

    transform_test = T.Compose([
        T.Resize((config.DATA.HEIGHT, config.DATA.WIDTH)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    return transform_train, transform_test
    def initialize(self, dataset_opt):
        self.phase = dataset_opt.phase
        self.dir = dataset_opt.dataroot

        if dataset_opt.use_subset:
            subset_path = os.path.join(self.dir, 'subset.txt')
            with open(subset_path) as f:
                self.paths = [
                    os.path.join(self.dir, line.rstrip('\n')) for line in f
                ]
        else:
            self.paths = []
            self.paths = get_image_paths_recursive(self.dir, self.paths)
        self.size = len(self.paths)
        self.transform_H = transforms.get_transform_H(dataset_opt.transform_H)
        self.transform_L = transforms.get_transform_L(dataset_opt.transform_L)
        self.to_tensor = transforms.ToTensor()
Пример #4
0
 def train(self, signals, labels, sequences):
     self.epoch_forward_init(signals, labels, sequences, True)
     for i in range(self.epoch_iter_length):
         signal, label, _ = self.queue.get()
         self.step = float(i / self.epoch_iter_length + self.epoch)
         self.optimizer.zero_grad()
         signal, label = transforms.ToTensor(signal,
                                             label,
                                             gpu_id=self.opt.gpu_id)
         output, loss = self.forward(signal, label)
         self.epoch_loss += loss.item()
         loss.backward()
         self.optimizer.step()
     self.load_poll_terminate()
     self.opt.TBGlobalWriter.add_scalars(
         'fold' + str(self.fold + 1) + '/loss',
         {'train_loss': self.epoch_loss / (i + 1)}, self.step)
     self.add_class_acc_to_tensorboard('train')
Пример #5
0
    def eval(self, signals, labels, sequences):
        self.epoch_forward_init(signals, labels, sequences, False)
        for i in range(self.epoch_iter_length):
            signal, label, sequence = self.queue.get()
            self.eval_detail['sequences'].append(list(sequence))
            self.eval_detail['ture_labels'].append(list(label))
            signal, label = transforms.ToTensor(signal,
                                                label,
                                                gpu_id=self.opt.gpu_id)
            with torch.no_grad():
                output, loss = self.forward(signal, label)
                self.epoch_loss += loss.item()

        if self.opt.mode == 'autoencoder':
            if (self.epoch + 1) % 10 == 0:
                plot.draw_autoencoder_result(signal.data.cpu().numpy(),
                                             output.data.cpu().numpy(),
                                             self.opt)
                plot.draw_scatter(self.features, self.opt)
        else:
            prec, reca, f1, err, kappa = statistics.report(self.confusion_mat)
            print(
                'epoch:' + str(self.epoch + 1),
                ' macro-prec,reca,F1,err,kappa: ' +
                str(statistics.report(self.confusion_mat)))
            self.add_class_acc_to_tensorboard('eval')
            self.results['F1'].append(f1)
            self.results['err'].append(err)
            self.results['confusion_mat'].append(self.confusion_mat)
            self.results['loss'].append(self.epoch_loss / (i + 1))
            self.results['eval_detail'].append(self.eval_detail)
            if self.opt.best_index == 'f1':
                if f1 >= max(self.results['F1']):
                    self.results['best_epoch'] = self.epoch
            elif self.opt.best_index == 'err':
                if err <= min(self.results['err']):
                    self.results['best_epoch'] = self.epoch

        self.load_poll_terminate()
        self.opt.TBGlobalWriter.add_scalars(
            'fold' + str(self.fold + 1) + '/loss',
            {'eval_loss': self.epoch_loss / (i + 1)}, self.step)
        self.epoch += 1
def get_transform(train, dataset_name):
    base_size = cfg.DATA_TRANSFORM.LOADSIZE
    crop_size = cfg.DATA_TRANSFORM.CROPSIZE
    ignore_label = cfg.DATASET.IGNORE_LABEL

    if dataset_name == cfg.DATASET.SOURCE:
        input_size = cfg.DATA_TRANSFORM.INPUT_SIZE_S
    else:
        input_size = cfg.DATA_TRANSFORM.INPUT_SIZE_T

    min_size = int((1.0 if train else 1.0) * base_size)
    max_size = int((1.3 if train else 1.0) * base_size)

    transforms = []
    if cfg.DATA_TRANSFORM.RANDOM_RESIZE_AND_CROP:
        if train:
            transforms.append(T.RandomResize(min_size, max_size))
            transforms.append(T.RandomHorizontalFlip(0.5))
            transforms.append(
                T.RandomCrop(crop_size, ignore_label=ignore_label))
        else:
            transforms.append(T.Resize(cfg.DATA_TRANSFORM.INPUT_SIZE_T, True))
    else:
        if train:
            transforms.append(T.Resize(input_size))
            transforms.append(T.RandomHorizontalFlip(0.5))
        else:
            transforms.append(T.Resize(input_size, True))

    mapping = get_label_map(cfg.DATASET.SOURCE, cfg.DATASET.TARGET)
    transforms.append(T.LabelRemap(mapping[dataset_name]))
    transforms.append(T.ToTensor(cfg.DATASET.IMG_MODE))
    if cfg.DATASET.IMG_MODE == "BGR":
        mean = (104.00698793, 116.66876762, 122.67891434)
        std = (1.0, 1.0, 1.0)
    else:
        mean = (0.485, 0.456, 0.406)
        std = (0.229, 0.224, 0.225)

    transforms.append(T.Normalize(mean, std))
    return T.Compose(transforms)
Пример #7
0
def create_data_loader(mode, opt, shuffle=True):
    if opt.dataset == 'SIDD_Tensor':

        if mode == 'train':
            csv_file_name = "train.txt"

            cpu_transforms = []
            if opt.patch_size != '-1':
                cpu_transforms.append(_transforms.RandomCrop(opt.patch_size))
            cpu_transforms.append(_transforms.ToTensor())

            gpu_transforms = []
            if opt.augs[0] != '-1':
                for aug in opt.augs:
                    gpu_transforms.append(getattr(_transforms, aug)())

            dataset = data_sets.SIDD_Tensor(
                opt=opt,
                csv_file_name=csv_file_name,
                cpu_transform=transforms.Compose(cpu_transforms),
                gpu_transform=transforms.Compose(gpu_transforms))

            return DataLoader(dataset,
                              batch_size=opt.batch_size,
                              shuffle=shuffle,
                              num_workers=4,
                              pin_memory=True)

        elif mode == 'test':
            csv_file_name = "test.txt"

            cpu_transforms = []
            if opt.patch_size != '-1':
                cpu_transforms.append(_transforms.RandomCrop(opt.patch_size))
            cpu_transforms.append(_transforms.ToTensor())

            dataset = data_sets.SIDD_Tensor(
                opt=opt,
                csv_file_name=csv_file_name,
                cpu_transform=transforms.Compose(cpu_transforms),
                manual_length=opt.test_set_length)

            return DataLoader(dataset,
                              batch_size=1,
                              shuffle=shuffle,
                              num_workers=4,
                              pin_memory=True)

        elif mode == 'validation':
            dataset = data_sets.SIDD_Validation(
                data_root=opt.sidd_validation_root,
                gt_file_name='ValidationGtBlocksSrgb',
                noisy_file_name='ValidationNoisyBlocksSrgb')

            return DataLoader(dataset,
                              batch_size=1,
                              shuffle=shuffle,
                              num_workers=4,
                              pin_memory=True)
        else:
            raise Exception("dataloader mode incorrect")

    else:
        print("Dataset not found.")
        return None
Пример #8
0
def main():
    logging.basicConfig(filename='logs' + os.sep + 'example.log',
                        level=logging.DEBUG)
    data_transforms = {
        'train':
        T.Compose([
            T.ToOriginalHU(INTENSITY_OFFSET),
            T.IntensityWindowing(WINDOWING),
            T.SpacingResize(NORM_SPACING, MAX_SIZE),
            T.ToTensor()
        ]),
        'val':
        T.Compose([
            T.ToOriginalHU(INTENSITY_OFFSET),
            T.IntensityWindowing(WINDOWING),
            T.SpacingResize(NORM_SPACING, MAX_SIZE),
            T.ToTensor()
        ]),
        'test':
        T.Compose([
            T.ToOriginalHU(INTENSITY_OFFSET),
            T.IntensityWindowing(WINDOWING),
            T.SpacingResize(NORM_SPACING, MAX_SIZE),
            T.ToTensor()
        ])
    }

    logging.info('Loading data sets')
    image_datasets = {
        x: DeepLesion(DIR_IN + os.sep + x, GT_FN_DICT[x], data_transforms[x])
        for x in ['train', 'val', 'test']
    }
    logging.info('data sets loaded')
    logging.info('Loading data loaders')
    dl_dataloaders = {
        x: DataLoader(image_datasets[x],
                      batch_size=3,
                      shuffle=True,
                      num_workers=0,
                      collate_fn=BatchCollator)
        for x in ['train', 'val', 'test']
    }

    logging.info('data loaders loaded\n')
    dl_dataset_sizes = {
        x: len(image_datasets[x])
        for x in ['train', 'val', 'test']
    }

    # for batch_id, (inputs, targets) in enumerate(dl_dataloaders['train']):
    #     i = 0
    #     for i, (image, target) in enumerate(zip(inputs, targets)):
    #         img_copy = image.squeeze().numpy()
    #         images = [img_copy] * 3
    #         images = [im.astype(float) for im in images]
    #         img_copy = cv2.merge(images)
    #         for j, (bbox, pseudo_mask) in enumerate(zip(target["boxes"], target["masks"])):
    #             bbox = target["boxes"][j].squeeze().numpy()
    #             bbox = np.int16(bbox)
    #             mask = target["masks"][j].squeeze().numpy()
    #             cv2.rectangle(img_copy, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 0), 1)
    #             msk_idx = np.where(mask == 1)
    #             img_copy[msk_idx[0], msk_idx[1], 0] = 255
    #         cv2.imshow(str(batch_id) + " " + str(i), img_copy)
    #
    # cv2.waitKey(0)
    # cv2.destroyAllWindows()
    dl_model = get_model(False, True, 2)

    params = [p for p in dl_model.parameters() if p.requires_grad]

    # Observe that not all parameters are being optimized
    optimizer_ft = SGD(params, lr=0.001, momentum=0.9, weight_decay=0.0001)
    # optimizer_ft = Adam(params, lr=0.001)

    # Decay LR by a factor of 0.1 every 7 epochs
    # exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=4, gamma=0.1)
    # exp_lr_scheduler = lr_scheduler.CosineAnnealingLR(optimizer_ft, T_max=100)

    num_epochs = 10
    since = time.time()
    # best_model_wts = copy.deepcopy(dl_model.state_dict())
    # best_llf = 0
    # best_nlf = 999

    logging.info('momentum:' +
                 str(optimizer_ft.state_dict()['param_groups'][0]['momentum']))
    logging.info(
        'weight_decay:' +
        str(optimizer_ft.state_dict()['param_groups'][0]['weight_decay']))
    # logging.info('LR decay gamma:' + str(exp_lr_scheduler.state_dict()['gamma']))
    # logging.info('LR decay step size:' + str(exp_lr_scheduler.state_dict()['step_size']) + '\n')

    for epoch in range(num_epochs):
        # deep_copy_flag = False
        logging.info('Epoch {}/{}'.format(epoch, num_epochs - 1))
        logging.info('-' * 20)
        train_one_epoc(dl_model, optimizer_ft, dl_dataloaders['train'],
                       dl_dataset_sizes['train'])

        llf, nlf = evaluate(dl_model, dl_dataloaders['val'])

        logging.info('LLF: {}'.format(llf))
        logging.info('NLF: {}'.format(nlf) + '\n')

        # exp_lr_scheduler.step()

        # if llf > best_llf:
        #     deep_copy_flag = True
        #     best_nlf = nlf
        #     best_llf = llf
        # elif (llf == best_llf) & (nlf < best_nlf):
        #     deep_copy_flag = True
        #     best_nlf = nlf
        # if deep_copy_flag:
        best_model_wts = copy.deepcopy(dl_model.state_dict())
        torch.save(best_model_wts,
                   'saved_models' + os.sep + str(epoch) + '_deeplesion.pth')
    time_elapsed = time.time() - since
    logging.info('Training complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))
Пример #9
0
def main():
    print('starting denoising')

    noise_sigma = 4e-5  # sigma for the noise simulation
    batch_size = 8  # number of images to run for each minibach
    num_epochs = 200  # number of epochs to train
    validation_seed = 15  # rng seed for validation loop
    log_dir = 'logs/denoise/'  # log dir for models and tensorboard
    device = torch.device('cpu')  # model will run on this device
    dtype = torch.float  # dtype for data and model

    # set up tensorboard
    writer = SummaryWriter(log_dir=log_dir)

    # checkpoint file name
    checkpoint_file = os.path.join(log_dir + 'best_model.pt')

    # -------------------------------------------------------------------------
    # NOISE SIMULATION SETUP
    transform_list = [
        transforms.AddNoise(target_op=False, sigma=noise_sigma),
        transforms.Ifft(norm='ortho'),
        transforms.SquareRootSumSquare(),
        transforms.Normalize(),
        transforms.ToTensor(dat_complex=False, target_complex=False)
    ]

    # -------------------------------------------------------------------------
    # DATALOADER SETUP
    train_dataset = KneeDataSet('pytorch_tutorial_data/',
                                'train',
                                transform=transforms.Compose(transform_list))
    print('data set information:')
    print(train_dataset)
    val_dataset = KneeDataSet('pytorch_tutorial_data/',
                              'val',
                              transform=transforms.Compose(transform_list))
    # convert to a PyTorch dataloader
    # this handles batching, random shuffling, parallelization
    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=batch_size,
        shuffle=True,
    )
    val_loader = torch.utils.data.DataLoader(
        val_dataset,
        batch_size=batch_size,
        shuffle=True,
    )
    display_dat = val_dataset[15]['dat'].unsqueeze(0).to(device=device,
                                                         dtype=dtype)
    display_target = val_dataset[15]['target'].unsqueeze(0).to(device=device,
                                                               dtype=dtype)
    display_vmax = np.max(np.squeeze(display_dat.cpu().numpy()))

    # -------------------------------------------------------------------------
    # MODEL SETUP
    model = DenoiseCnn(num_chans=64,
                       num_layers=4,
                       magnitude_input=True,
                       magnitude_output=True)
    model = model.to(device)
    model = model.train()
    print('CNN model information:')
    print(model)

    # -------------------------------------------------------------------------
    # OPTIMIZER SETUP
    optimizer = torch.optim.Adam(model.parameters())
    loss_fn = torch.nn.MSELoss()

    # -------------------------------------------------------------------------
    # LOAD PREVIOUS STATE
    start_epoch, model, optimizer, min_val_loss = load_checkpoint(
        checkpoint_file, model, optimizer)
    current_seed = 20

    # -------------------------------------------------------------------------
    # NETWORK TRAINING
    for epoch_index in range(start_epoch, num_epochs):
        print('epoch {} of {}'.format(epoch_index + 1, num_epochs))

        # ---------------------------------------------------------------------
        # TRAINING LOOP
        model = model.train()

        # rng seed for noise generation
        torch.manual_seed(current_seed)
        np.random.seed(current_seed)
        torch.cuda.manual_seed(current_seed)

        # batch loop
        losses = []
        for batch in train_loader:
            target = batch['target'].to(device=device, dtype=dtype)
            dat = batch['dat'].to(device=device, dtype=dtype)

            est = model(dat)  # forward propagation
            loss = loss_fn(est, target)  # calculate the loss
            optimizer.zero_grad()  # clear out old gradients
            loss.backward()  # back propagation
            optimizer.step()  # update the CNN weights

            # keep last 10 minibatches to compute training loss
            losses.append(loss.item())
            losses = losses[-10:]

        print('trailing training loss: {}'.format(np.mean(losses)))

        # ---------------------------------------------------------------------
        # EVALUATION LOOP
        model = model.eval()

        # rng seed for noise generation
        current_seed = np.random.get_state()[1][0]
        torch.manual_seed(validation_seed)
        np.random.seed(validation_seed)
        torch.cuda.manual_seed(validation_seed)

        # batch loop
        val_losses = []
        with torch.no_grad():
            for batch in val_loader:
                target = batch['target'].to(device=device, dtype=dtype)
                dat = batch['dat'].to(device=device, dtype=dtype)

                est = model(dat)
                loss = loss_fn(est, target)

                val_losses.append(loss.item())

        print('validation loss: {}'.format(np.mean(val_losses)))

        # ---------------------------------------------------------------------
        # VISUALIZATIONS AND CHECKPOINTS
        if np.mean(val_losses) < min_val_loss:
            save_checkpoint(epoch_index, model, optimizer, np.mean(val_losses),
                            checkpoint_file)

        # write the losses
        writer.add_scalar('loss/train', np.mean(losses), epoch_index + 1)
        writer.add_scalar('loss/validation', np.mean(val_losses),
                          epoch_index + 1)

        # show an example image from the validation data
        model = model.eval()
        with torch.no_grad():
            display_est = model(display_dat)

        writer.add_image('validation/dat',
                         display_dat[0] / display_vmax,
                         global_step=epoch_index + 1)
        writer.add_image('validation/cnn',
                         display_est[0] / display_vmax,
                         global_step=epoch_index + 1)
        writer.add_image('validation/target',
                         display_target[0] / display_vmax,
                         global_step=epoch_index + 1)

    writer.close()
Пример #10
0
    saver = Saver(args)
    saver.save_experiment_config()

    # Define Tensorboard Summary
    summary = TensorboardSummary(saver.experiment_dir)
    args.exp = saver.experiment_dir.split('_')[-1]

    if args.train_dataset == 'cityscapes':
        # Data
        train_trans = transforms.Compose([
            transforms.ToPILImage(),
            # transforms.RandomResizedCrop((args.image_size, args.image_size), scale=(0.2, 2)),
            transforms.Resize((args.image_size, args.image_size)),
            transforms.RandomHorizontalFlip(),
            transforms.RandomAffine(22, scale=(0.75, 1.25)),
            transforms.ToTensor(),
            transforms.Normalize(mean=[123.675, 116.28, 103.53],
                                 std=[58.395, 57.12, 57.375])
            # transforms.NormalizeInstance()
        ])
        val_trans = transforms.Compose([
            transforms.ToPILImage(),
            transforms.Resize((args.image_size, args.image_size),
                              do_mask=False),
            transforms.ToTensor(),
            transforms.Normalize(mean=[123.675, 116.28, 103.53],
                                 std=[58.395, 57.12, 57.375])
            # transforms.NormalizeInstance()
        ])

        if args.ann_type == 'comp':
Пример #11
0
def main(args):
    assert torch.cuda.is_available(), 'CUDA is not available.'
    torch.backends.cudnn.enabled = True
    torch.backends.cudnn.benchmark = True
    prepare_seed(args.rand_seed)
    logstr = 'seed-{:}-time-{:}'.format(args.rand_seed, time_for_file())
    logger = Logger(args.save_path, logstr)
    logger.log('Main Function with logger : {:}'.format(logger))
    logger.log('Arguments : -------------------------------')
    for name, value in args._get_kwargs():
        logger.log('{:16} : {:}'.format(name, value))
    logger.log("Python  version : {}".format(sys.version.replace('\n', ' ')))
    logger.log("Pillow  version : {}".format(PIL.__version__))
    logger.log("PyTorch version : {}".format(torch.__version__))
    logger.log("cuDNN   version : {}".format(torch.backends.cudnn.version()))

    # General Data Argumentation
    mean_fill = tuple([int(x * 255) for x in [0.485, 0.456, 0.406]])
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    assert args.arg_flip == False, 'The flip is : {}, rotate is {}'.format(args.arg_flip, args.rotate_max)
    train_transform = [transforms.PreCrop(args.pre_crop_expand)]
    train_transform += [transforms.TrainScale2WH((args.crop_width, args.crop_height))]
    train_transform += [transforms.AugScale(args.scale_prob, args.scale_min, args.scale_max)]
    # if args.arg_flip:
    #  train_transform += [transforms.AugHorizontalFlip()]
    if args.rotate_max:
        train_transform += [transforms.AugRotate(args.rotate_max)]
    train_transform += [transforms.AugCrop(args.crop_width, args.crop_height, args.crop_perturb_max, mean_fill)]
    train_transform += [transforms.ToTensor(), normalize]
    train_transform = transforms.Compose(train_transform)

    eval_transform = transforms.Compose(
        [transforms.PreCrop(args.pre_crop_expand), transforms.TrainScale2WH((args.crop_width, args.crop_height)),
         transforms.ToTensor(), normalize])
    assert (args.scale_min + args.scale_max) / 2 == args.scale_eval, 'The scale is not ok : {},{} vs {}'.format(
        args.scale_min, args.scale_max, args.scale_eval)

    # Model Configure Load
    model_config = load_configure(args.model_config, logger)
    args.sigma = args.sigma * args.scale_eval
    logger.log('Real Sigma : {:}'.format(args.sigma))

    # Training Dataset
    train_data = GeneralDataset(train_transform, args.sigma, model_config.downsample, args.heatmap_type, args.data_indicator)
    train_data.load_list(args.train_lists, args.num_pts, True)
    train_loader = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size,
                                               shuffle=True,num_workers=args.workers,
                                               pin_memory=True)
    # Evaluation Dataloader
    eval_loaders = []

    if args.eval_ilists is not None:
        for eval_ilist in args.eval_ilists:
            eval_idata = GeneralDataset(eval_transform, args.sigma, model_config.downsample, args.heatmap_type,
                                 args.data_indicator)
            eval_idata.load_list(eval_ilist, args.num_pts, True)
            eval_iloader = torch.utils.data.DataLoader(eval_idata, batch_size=args.batch_size, shuffle=False,
                                                       num_workers=args.workers, pin_memory=True)
            eval_loaders.append((eval_iloader, False))

    # Define network
    logger.log('configure : {:}'.format(model_config))
    net = obtain_model(model_config, args.num_pts + 1)

    assert model_config.downsample == net.downsample, 'downsample is not correct : {} vs {}'.format(
        model_config.downsample, net.downsample)
    logger.log("=> network :\n {}".format(net))

    logger.log('Training-data : {:}'.format(train_data))
    for i, eval_loader in enumerate(eval_loaders):
        eval_loader, is_video = eval_loader
        logger.log('The [{:2d}/{:2d}]-th testing-data [{:}] = {:}'.format(i, len(eval_loaders),
                                                                          'video' if is_video else 'image',
                                                                          eval_loader.dataset))
    logger.log('arguments : {:}'.format(args))
    opt_config = load_configure(args.opt_config, logger)

    if hasattr(net, 'specify_parameter'):
        net_param_dict = net.specify_parameter(opt_config.LR, opt_config.Decay)
    else:
        net_param_dict = net.parameters()

    optimizer, scheduler, criterion = obtain_optimizer(net_param_dict, opt_config, logger)
    logger.log('criterion : {:}'.format(criterion))
    net, criterion = net.cuda(), criterion.cuda()
    net = torch.nn.DataParallel(net)

    last_info = logger.last_info()
    if last_info.exists():
        logger.log("=> loading checkpoint of the last-info '{:}' start".format(last_info))
        last_info = torch.load(str(last_info))
        start_epoch = last_info['epoch'] + 1
        checkpoint = torch.load(last_info['last_checkpoint'])
        assert last_info['epoch'] == checkpoint['epoch'], 'Last-Info is not right {:} vs {:}'.format(last_info,
                                                                                                     checkpoint[
                                                                                                         'epoch'])
        net.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        scheduler.load_state_dict(checkpoint['scheduler'])
        logger.log("=> load-ok checkpoint '{:}' (epoch {:}) done".format(logger.last_info(), checkpoint['epoch']))
    else:
        logger.log("=> do not find the last-info file : {:}".format(last_info))
        start_epoch = 0

    if args.eval_once:
        logger.log("=> only evaluate the model once")
        eval_results = eval_all(args, eval_loaders, net, criterion, 'eval-once', logger, opt_config)
        logger.close()
        return

        # Main Training and Evaluation Loop
    start_time = time.time()
    epoch_time = AverageMeter()
    for epoch in range(start_epoch, opt_config.epochs):
        scheduler.step()
        need_time = convert_secs2time(epoch_time.avg * (opt_config.epochs - epoch), True)
        epoch_str = 'epoch-{:03d}-{:03d}'.format(epoch, opt_config.epochs)
        LRs = scheduler.get_lr()
        logger.log('\n==>>{:s} [{:s}], [{:s}], LR : [{:.5f} ~ {:.5f}], Config : {:}'.format(time_string(), epoch_str,
                                                                                            need_time, min(LRs),
                                                                                            max(LRs), opt_config))

        # train for one epoch
        train_loss, train_nme = train(args, train_loader, net, criterion,
                                      optimizer, epoch_str, logger, opt_config)
        # log the results
        logger.log(
            '==>>{:s} Train [{:}] Average Loss = {:.6f}, NME = {:.2f}'.format(time_string(), epoch_str, train_loss,
                                                                              train_nme * 100))

        # remember best prec@1 and save checkpoint
        save_path = save_checkpoint({
            'epoch': epoch,
            'args': deepcopy(args),
            'arch': model_config.arch,
            'state_dict': net.state_dict(),
            'scheduler': scheduler.state_dict(),
            'optimizer': optimizer.state_dict(),
        }, str(logger.path('model') / '{:}-{:}.pth'.format(model_config.arch, epoch_str)), logger)

        last_info = save_checkpoint({
            'epoch': epoch,
            'last_checkpoint': save_path,
        }, str(logger.last_info()), logger)

        eval_results = eval_all(args, eval_loaders, net, criterion, epoch_str, logger, opt_config)

        # measure elapsed time
        epoch_time.update(time.time() - start_time)
        start_time = time.time()

    logger.close()
Пример #12
0
    def dann_train(self,
                   signals,
                   labels,
                   src_sequences,
                   dst_sequences,
                   beta=1.0,
                   stable=False):
        self.epoch_forward_init(signals, labels, src_sequences, True)
        dst_signals_len = len(dst_sequences)
        domain_queue = Queue(self.opt.load_thread)
        self.domain_random_loader_init(signals, labels, dst_sequences,
                                       domain_queue)
        domains = np.load(os.path.join(self.opt.dataset_dir, 'domains.npy'))
        domains = dataloader.rebuild_domain(domains)
        loss_show = [0, 0, 0]

        for i in range(self.epoch_iter_length):
            self.step = float(i / self.epoch_iter_length + self.epoch)
            p = self.step / self.opt.epochs
            if stable: alpha = beta
            else: alpha = beta * (2. / (1. + np.exp(-10 * p)) - 1)
            self.optimizer.zero_grad()
            # src
            s_signal, s_label, sequence = self.queue.get()
            this_batch_len = s_signal.shape[0]
            s_signal, s_label = transforms.ToTensor(s_signal,
                                                    s_label,
                                                    gpu_id=self.opt.gpu_id)
            if self.opt.domain_num == 2:
                s_domain = transforms.ToTensor(None,
                                               np.zeros(this_batch_len,
                                                        dtype=np.int64),
                                               gpu_id=self.opt.gpu_id)
            else:
                s_domain = transforms.batch_generator(None, domains, sequence)
                s_domain = transforms.ToTensor(None,
                                               s_domain,
                                               gpu_id=self.opt.gpu_id)
            # dst
            d_signal, _, sequence = domain_queue.get()
            d_signal = transforms.ToTensor(d_signal[:this_batch_len],
                                           None,
                                           gpu_id=self.opt.gpu_id)
            if self.opt.domain_num == 2:
                d_domain = transforms.ToTensor(None,
                                               np.ones(this_batch_len,
                                                       dtype=np.int64),
                                               gpu_id=self.opt.gpu_id)
            else:
                d_domain = transforms.batch_generator(
                    None, domains, sequence[:this_batch_len])
                d_domain = transforms.ToTensor(None,
                                               d_domain,
                                               gpu_id=self.opt.gpu_id)

            class_output, domain_output = self.net(s_signal, alpha=alpha)
            self.add_label_to_confusion_mat(s_label, class_output, False)
            loss_s_label = self.loss_dann_c(class_output, s_label)
            loss_s_domain = self.loss_dann_d(domain_output, s_domain)
            _, domain_output = self.net(d_signal, alpha=alpha)
            loss_d_domain = self.loss_dann_d(domain_output, d_domain)
            loss = loss_s_label + loss_s_domain + loss_d_domain
            loss_show[0] += loss_s_label.item()
            loss_show[1] += loss_s_domain.item()
            loss_show[2] += loss_d_domain.item()

            loss.backward()
            self.optimizer.step()

        self.add_class_acc_to_tensorboard('train')
        self.opt.TBGlobalWriter.add_scalars(
            'fold' + str(self.fold + 1) + '/loss', {
                'src_label': loss_show[0] / (i + 1),
                'src_domain': loss_show[1] / (i + 1),
                'dst_domain': loss_show[2] / (i + 1)
            }, self.step)
Пример #13
0
'''
@hypox64
2020/04/03
'''
opt = options.Options().getparse()
net,exp = creatnet.creatnet(opt)

#load data
signals = np.load('./datasets/simple_test/signals.npy')
labels = np.load('./datasets/simple_test/labels.npy')

#load prtrained_model
net.load_state_dict(torch.load('./checkpoints/pretrained/micro_multi_scale_resnet_1d_50class.pth'))
net.eval()
if self.opt.gpu_id != '-1' and len(self.opt.gpu_id) == 1:
    self.net.cuda()
elif self.opt.gpu_id != '-1' and len(self.opt.gpu_id) > 1:
    self.net = nn.DataParallel(self.net)
    self.net.cuda()

for signal,true_label in zip(signals, labels):
    signal = signal.reshape(1,1,-1).astype(np.float32) #batchsize,ch,length
    true_label = true_label.reshape(1).astype(np.int64) #batchsize
    signal,true_label = transforms.ToTensor(signal,true_label,gpu_id =opt.gpu_id)
    out = net(signal)
    pred_label = torch.max(out, 1)[1]
    pred_label=pred_label.data.cpu().numpy()
    true_label=true_label.data.cpu().numpy()
    print(("true:{0:d} predict:{1:d}").format(true_label[0],pred_label[0]))