Ejemplo n.º 1
0
    def run(self, dataloader):

        self.on_epoch_start()

        logs = {}
        loss_meter = AverageValueMeter()
        metrics_meters = {metric.__name__: AverageValueMeter() for metric in self.metrics}

        with tqdm(dataloader, desc=self.stage_name, file=sys.stdout, disable=not (self.verbose)) as iterator:
            for x, y in iterator:
                x, y = x.to(self.device), y.to(self.device)
                loss, y_pred = self.batch_update(x, y)

                # update loss logs
                loss_value = loss.cpu().detach().numpy()
                loss_meter.add(loss_value)
                loss_logs = {self.loss.__name__: loss_meter.mean}
                logs.update(loss_logs)

                # update metrics logs
                for metric_fn in self.metrics:
                    metric_value = metric_fn(y_pred, y).cpu().detach().numpy()
                    metrics_meters[metric_fn.__name__].add(metric_value)
                metrics_logs = {k: v.mean for k, v in metrics_meters.items()}
                logs.update(metrics_logs)

                if self.verbose:
                    s = self._format_logs(logs)
                    iterator.set_postfix_str(s)

        return logs
Ejemplo n.º 2
0
    def __init__(self, faster_rcnn, attacker=None, attack_mode=False):
        super(VictimFasterRCNNTrainer, self).__init__()

        self.faster_rcnn = faster_rcnn
        self.attacker = attacker
        self.rpn_sigma = opt.rpn_sigma
        self.roi_sigma = opt.roi_sigma
        self.attack_mode = attack_mode

        # target creator create gt_bbox gt_label etc as training targets.
        self.anchor_target_creator = AnchorTargetCreator()
        self.proposal_target_creator = ProposalTargetCreator()

        self.loc_normalize_mean = faster_rcnn.loc_normalize_mean
        self.loc_normalize_std = faster_rcnn.loc_normalize_std

        self.optimizer = self.faster_rcnn.get_optimizer()
        # visdom wrapper
        self.vis = Visualizer(env=opt.env)

        # indicators for training status
        self.rpn_cm = ConfusionMeter(2)
        self.roi_cm = ConfusionMeter(2)
        self.meters = {k: AverageValueMeter()
                       for k in LossTuple._fields}  # average loss
        self.adv_meters = {
            k: AverageValueMeter()
            for k in LossTupleAdv._fields
        }  # average loss
Ejemplo n.º 3
0
 def __init__(self):
     self.yolov2 = BackboneCOCO().to(opt.device)
     self.optimizer = self.init_optimizer()
     # self.anchors dtype: np.ndarray
     self.anchors = torch.from_numpy(parse_anchors(opt.anchors_path)).to(opt.device)
     self.loss_meter = AverageValueMeter()
     self.logger = opt.logger
Ejemplo n.º 4
0
    def __init__(self, r_fcn: RFCN):
        super(RFCN_Trainer, self).__init__()

        self.r_fcn = r_fcn
        self.rpn_sigma = opt.rpn_sigma
        self.roi_sigma = opt.roi_sigma

        # generate anchor for RPN training
        self.anchor_target_creator = AnchorTargetCreator()

        proposal_target_num = 300 if opt.use_OHEM else 128
        self.proposal_target_creator = ProposalTargetCreator(n_sample=proposal_target_num)

        self.loc_normalize_mean = r_fcn.loc_normalize_mean
        self.loc_normalize_std = r_fcn.loc_normalize_std

        self.optimizer = self.get_optimizer()

        # visdom wrapper
        self.viz = visdom.Visdom(env=opt.viz_env)
        self.viz_index = 0
        self.log_text = ''

        # record training status
        self.rpn_cm = ConfusionMeter(2)
        self.roi_cm = ConfusionMeter(self.r_fcn.class_num)
        if opt.FIX_HEAD:
            self.meters = {k: AverageValueMeter() for k in RPN_LossTuple._fields}
        else:
            self.meters = {k: AverageValueMeter() for k in RFCN_LossTuple._fields}
    def __init__(self,
                 faster_rcnn,
                 attacker=None,
                 layer_idx=None,
                 attack_mode=False):
        super(BRFasterRcnnTrainer, self).__init__()

        self.faster_rcnn = faster_rcnn
        self.attacker = attacker
        self.layer_idx = layer_idx
        self.rpn_sigma = opt.rpn_sigma
        self.roi_sigma = opt.roi_sigma
        self.attack_mode = attack_mode

        self.anchor_target_creator = AnchorTargetCreator()
        self.proposal_target_creator = ProposalTargetCreator()

        self.loc_normalize_mean = faster_rcnn.loc_normalize_mean
        self.loc_normalize_std = faster_rcnn.loc_normalize_std

        self.optimizer = self.faster_rcnn.get_optimizer()

        self.vis = Visualizer(env=opt.env)

        self.rpn_cm = ConfusionMeter(2)
        self.roi_cm = ConfusionMeter(21)
        self.meters = {k: AverageValueMeter() for k in LossTuple._fields}
        self.BR_meters = {k: AverageValueMeter() for k in LossTupleBR._fields}
Ejemplo n.º 6
0
 def __init__(self, args, model):
     self.name = args.name
     self.model = model
     self.l1win = None
     self.l2win = None
     self.l1meter = AverageValueMeter()
     self.l2meter = AverageValueMeter()
     self.visdom = Visdom(
         port=args.vis_port) if args.vis_steps > 0 else None
Ejemplo n.º 7
0
 def __init__(self):
     self.darknet53 = DarkNet53().to(opt.device)
     self.optimizer = self.init_optimizer()
     self.anchors = parse_anchors(opt.anchors_path)
     self.loss_layer = LossLayer(self.anchors)
     self.meter = AverageValueMeter()
     self.loss_dict = defaultdict(dict)
     self.img_size = opt.img_size
     self.scheduler = CosineAnnealingLR(self.optimizer, T_max=5, eta_min=0.)
Ejemplo n.º 8
0
    def run(self, dataloader):
        global exp_writer

        self.on_epoch_start()

        logs = {}
        loss_meter = AverageValueMeter()
        metrics_meters = {
            metric.__name__: AverageValueMeter()
            for metric in self.metrics
        }

        with tqdm(dataloader,
                  desc=self.stage_name,
                  file=sys.stdout,
                  disable=not (self.verbose)) as iterator:
            for x, y in iterator:
                x, y = x.to(self.device), y.to(self.device)
                loss, y_pred = self.batch_update(x, y)

                if self.cnt % 30 == 0:
                    exp_writer.write_image_to_tensorboard(
                        x.detach().cpu(),
                        y.detach().cpu().numpy(),
                        y_pred.detach().cpu())

                # update loss logs
                loss_value = loss.cpu().detach().numpy()
                loss_meter.add(loss_value)
                loss_logs = {self.loss.__name__: loss_meter.mean}
                logs.update(loss_logs)

                exp_writer.sw.add_scalar(
                    self.stage_name + '/loss_' + self.loss.__name__,
                    loss_value, self.cnt)

                # update metrics logs
                for metric_fn in self.metrics:
                    metric_value = metric_fn(y_pred, y).cpu().detach().numpy()

                    exp_writer.sw.add_scalar(
                        self.stage_name + '/metric_' + metric_fn.__name__,
                        metric_value, self.cnt)

                    metrics_meters[metric_fn.__name__].add(metric_value)
                metrics_logs = {k: v.mean for k, v in metrics_meters.items()}

                logs.update(metrics_logs)

                if self.verbose:
                    s = self._format_logs(logs)
                    iterator.set_postfix_str(s)

                self.cnt += 1

        return logs
Ejemplo n.º 9
0
def val():
    global highest_dice_loss
    dice_loss_meter = AverageValueMeter()
    dice_loss_meter.reset()
    for i, (img, mask, weak_mask, _) in enumerate(val_loader):
        if (weak_mask.sum() <= 3) or (mask.sum() <= 10):
            # print('No mask has been found')
            continue
        if not ((list(img.shape[-2:]) == list(mask.shape[-2:])) and (
                list(img.shape[-2:]) == list(weak_mask.shape[-2:]))):
            continue
        img, mask, weak_mask = img.cuda(), mask.cuda(), weak_mask.cuda()

        predict_ = F.softmax(net(img), dim=1)
        segm = pred2segmentation(predict_)
        diceloss_F = dice_loss(segm, mask)
        diceloss_B = dice_loss(1 - segm, 1 - mask)
        dice_loss_meter.add((diceloss_F + diceloss_B).item() / 2)

        if i % 100 == 0:
            board_val_image.image(img[0], 'medical image')
            board_val_image.image(color_transform(weak_mask[0]), 'weak_mask')
            board_val_image.image(color_transform(segm[0]), 'prediction')
    board_loss.plot('dice_loss for validationset', dice_loss_meter.value()[0])

    if dice_loss_meter.value()[0] > highest_dice_loss:
        highest_dice_loss = dice_loss_meter.value()[0]
        torch.save(net.state_dict(), 'Enet_Square_barrier.pth')
        print('saved with dice:%f' % highest_dice_loss)
Ejemplo n.º 10
0
def validate(val_loader, model, criterion, opt):
    data_time = TimeMeter(unit=1)
    losses = AverageValueMeter()
    errors = ClassErrorMeter(topk=[1])
    # switch to evaluate mode
    if isinstance(model, list):
        for m in model:
            m.eval()
    else:
        model.eval()

    with torch.no_grad():
        end = time.time()
        for i, (win_past, inp_pred, labels, min_values) in enumerate(zip(*val_loaders)):
            win_past = win_past.cuda(opt['g'], non_blocking=True)
            inp_pred = inp_pred.cuda(opt['g'], non_blocking=True)
            labels = labels.cuda(opt['g'], non_blocking=True)
            min_values = min_values.cuda(opt['g'], non_blocking=True)

            # compute output
            yh_cl, yh_reg = basik_tasks_model(win_past, win_pred)
            
            loss = criterion[0](yh_cl, labels)
            ctx.losses['cl'].add(loss.item())
            loss += opt['lambda'] * criterion[1](output, min_values)
            # measure accuracy and record loss
            errors.add(yh_cl.data, targets[0].data)
            losses['reg'].add(loss.item() - ctx.losses['cl'].value())
            losses['tot'].add(loss.item())
 
            errors.add(yh_cl, labels)
            losses.add(loss.item())
          
            loss = losses['tot'].value()[0]
            top1 = errors.value()[0]

            # if i % opt['print_freq'] == 0:
            #     print('[{0}/{1}]\t'
            #           'Time {time:.3f}\t'
            #           'Loss {loss:.4f}\t'
            #           'Err@1 {top1:.3f}\t'
            #           'Err@5 {top5:.3f}'.format(
            #            i, 
            #            len(val_loader),
            #            time=data_time.value(), loss=loss, 
            #            top1=top1, top5=top5))

        print('Loss {loss:.4f}'
              ' * Err@1 {top1:.3f}\t'
              .format(loss=loss, top1=top1))
    stats = {'loss': loss, 'top1': top1}
    ctx.metrics = stats
    return stats
Ejemplo n.º 11
0
 def validate(self):
     self.model.eval()
     self.reconstruction_loss_meter_val = AverageValueMeter()
     with torch.no_grad():
         for x, targets in self.test_dataloader:
             self.x, self.targets = Variable(x).to(
                 self.device), Variable(targets).to(self.device)
             z_mean, z_log_var, targets_reconstructed = self.model(
                 self.targets, self.x)
             self.reconstruction_loss_meter_val.add(
                 self.reconstruction_loss(targets_reconstructed,
                                          self.targets).detach().cpu())
         logging.info(f"Acc: {self.reconstruction_loss_meter_val.mean}")
class CalculateLossCallback(TrainingCallback):
    def __init__(self, key):
        self.key = key
        self.average_value_meter = AverageValueMeter()

    def on_mode_begin(self, mode, log):
        self.average_value_meter.reset()
        log[self.key] = float('NaN')

    def on_batch_end(self, batch, log):
        batch_size = log['batch_size']
        self.average_value_meter.add(log['loss'] * batch_size, batch_size)
        log[self.key] = self.average_value_meter.value()[0]
Ejemplo n.º 13
0
    def __init__(self, faster_rcnn):
        super(FasterRCNNTrainer, self).__init__()

        self.faster_rcnn = faster_rcnn
        self.rpn_sigma = opt.rpn_sigma
        self.roi_sigma = opt.roi_sigma
        self.rpn_pen = opt.rpn_pen
        self.roi_pen = opt.roi_pen

        # target creator create gt_bbox gt_label etc as training targets.
        # FLAG: add params
        # Initail best: pos 0.2, neg 0.1
        self.anchor_target_creator = AnchorTargetCreator(pos_ratio=0.5,
                                                         pos_iou_thresh=0.7,
                                                         neg_iou_thresh=0.3)
        # Initial best: pos 0.2, neg 0.2
        self.proposal_target_creator = ProposalTargetCreator(pos_ratio=0.5,
                                                             pos_iou_thresh=0.5,
                                                             neg_iou_thresh_hi=0.5)

        self.loc_normalize_mean = faster_rcnn.loc_normalize_mean
        self.loc_normalize_std = faster_rcnn.loc_normalize_std

        self.optimizer = self.faster_rcnn.get_optimizer()
        # visdom wrapper
        self.vis = Visualizer(env=opt.env)

        # indicators for training status
        self.rpn_cm = ConfusionMeter(2)
        self.roi_cm = ConfusionMeter(4)
        self.meters = {k: AverageValueMeter() for k in LossTuple._fields}  # average loss
Ejemplo n.º 14
0
    def __init__(self, args, dataset_len, phase=None):
        super(TrainLogger, self).__init__(args, dataset_len)

        # Tag suffix used for indicating training phase in loss + viz
        self.tag_suffix = phase

        self.num_epochs = args.num_epochs
        self.split = args.split

        self.gen_loss_meter = AverageValueMeter()
        self.disc_loss_meter = AverageValueMeter()

        self.device = args.device
        self.iter = 0
        self.steps_per_print = args.steps_per_print
        self.steps_per_visual = args.steps_per_visual
Ejemplo n.º 15
0
    def __init__(self, faster_rcnn):
        # 继承父模块的初始化
        super(FasterRCNNTrainer, self).__init__()

        self.faster_rcnn = faster_rcnn
        self.rpn_sigma = opt.rpn_sigma
        self.roi_sigma = opt.roi_sigma  # 超参:在_faster_rcnn_loc_loss调用用来计算位置损失函数

        # target creator create gt_bbox gt_label etc as training targets.
        # 用于从20000个候选anchor中产生256个anchor进行二分类和位置回归,也就是为rpn网络产生的预测位置和预测类别提供真正的ground_truth标准
        self.anchor_target_creator = AnchorTargetCreator()
        # AnchorTargetCreator和ProposalTargetCreator是为了生成训练的目标(或称ground truth),只在训练阶段用到,ProposalCreator是RPN为Fast R-CNN生成RoIs,在训练和测试阶段都会用到。所以测试阶段直接输进来300个RoIs,而训练阶段会有AnchorTargetCreator的再次干预。
        self.proposal_target_creator = ProposalTargetCreator()
        # (0., 0., 0., 0.)
        self.loc_normalize_mean = faster_rcnn.loc_normalize_mean
        # (0.1, 0.1, 0.2, 0.2)
        self.loc_normalize_std = faster_rcnn.loc_normalize_std

        self.optimizer = self.faster_rcnn.get_optimizer()  # SGD
        # visdom wrapper
        self.vis = Visualizer(env=opt.env)  # 可视化工具

        # indicators for training status
        # 混淆矩阵,验证预测值和真实值精确度,2为类别数
        self.rpn_cm = ConfusionMeter(2)
        #
        self.roi_cm = ConfusionMeter(21)
        self.meters = {k: AverageValueMeter()
                       for k in LossTuple._fields}  # average loss
Ejemplo n.º 16
0
    def __init__(self, faster_rcnn):
        # 继承父模块的初始化
        super(FasterRCNNTrainer, self).__init__()

        self.faster_rcnn = faster_rcnn
        # 下面2个参数是在_faster_rcnn_loc_loss调用用来计算位置损失函数用到的超参数
        self.rpn_sigma = opt.rpn_sigma
        self.roi_sigma = opt.roi_sigma

        # target creator create gt_bbox gt_label etc as training targets.
        # 用于从20000个候选anchor中产生256个anchor进行二分类和位置回归,也就是
        # 为rpn网络产生的预测位置和预测类别提供真正的ground_truth标准
        self.anchor_target_creator = AnchorTargetCreator()
        # AnchorTargetCreator和ProposalTargetCreator是为了生成训练的目标
        # (或称ground truth),只在训练阶段用到,ProposalCreator是RPN为Fast
        #  R-CNN生成RoIs,在训练和测试阶段都会用到。所以测试阶段直接输进来300
        # 个RoIs,而训练阶段会有AnchorTargetCreator的再次干预
        self.proposal_target_creator = ProposalTargetCreator()
        # (0., 0., 0., 0.)
        self.loc_normalize_mean = faster_rcnn.loc_normalize_mean
        # (0.1, 0.1, 0.2, 0.2)
        self.loc_normalize_std = faster_rcnn.loc_normalize_std
        # SGD
        self.optimizer = self.faster_rcnn.get_optimizer()
        # 可视化,vis_tool.py
        self.vis = Visualizer(env=opt.env)

        # 混淆矩阵,就是验证预测值与真实值精确度的矩阵ConfusionMeter
        # (2)括号里的参数指的是类别数
        self.rpn_cm = ConfusionMeter(2)
        # roi的类别有21种(20个object类+1个background)
        self.roi_cm = ConfusionMeter(21)
        # 平均损失
        self.meters = {k: AverageValueMeter()
                       for k in LossTuple._fields}  # average loss
Ejemplo n.º 17
0
    def __init__(self, faster_rcnn):
        super(FasterRCNNTrainer, self).__init__()

        self.faster_rcnn = faster_rcnn
        self.rpn_sigma = opt.rpn_sigma  #是在_faster_rcnn_loc_loss调用用来计算位置损失函数用到的超参数,
        self.roi_sigma = opt.roi_sigma

        self.anchor_target_creator = AnchorTargetCreator(
        )  #从上万个anchor中挑选256个来训练rpn,其中正样本不超过128
        self.proposal_target_creator = ProposalTargetCreator(
        )  #从rpn给的2000个框中挑出128个来训练roihead,其中正样本不超过32个

        self.loc_normalize_mean = faster_rcnn.loc_normalize_mean
        self.loc_normalize_std = faster_rcnn.loc_normalize_std

        self.optimizer = self.faster_rcnn.get_optimizer()
        #可视化
        self.vis = Visualizer(env=opt.env)

        #验证预测值和真实值的精度
        self.rpn_cm = ConfusionMeter(
            2)  #混淆矩阵,就是验证预测值与真实值精确度的矩阵ConfusionMeter(2)括号里的参数指的是类别数
        self.roi_cm = ConfusionMeter(opt.class_num + 1)
        self.meters = {k: AverageValueMeter()
                       for k in LossTuple._fields}  #验证平均loss
Ejemplo n.º 18
0
    def __init__(self, faster_rcnn):
        super(FasterRCNNTrainer, self).__init__()

        self.faster_rcnn = faster_rcnn
        #在faster_rcnn_loc_losss中调用,用来计算位置损失函数时用到的超参
        self.rpn_sigma = opt.rpn_sigma
        self.roi_sigma = opt.roi_sigma

        # target creator create gt_bbox gt_label etc as training targets.
        #用于从20000个候选anchor中产生256个anchor进行二分类和位置回归,用于rpn的训练
        self.anchor_target_creator = AnchorTargetCreator()
        #从2000个筛选出的ROIS中再次选出128个ROIs用于ROIhead训练
        self.proposal_target_creator = ProposalTargetCreator()
        #定义位置信息的均值方差。因为送入网络训练的位置信息需全部归一化处理
        self.loc_normalize_mean = faster_rcnn.loc_normalize_mean
        self.loc_normalize_std = faster_rcnn.loc_normalize_std

        self.optimizer = self.faster_rcnn.get_optimizer()
        # visdom wrapper
        self.vis = Visualizer(env=opt.env)

        # indicators for training status
        self.rpn_cm = ConfusionMeter(2)
        self.roi_cm = ConfusionMeter(21)
        self.meters = {k: AverageValueMeter()
                       for k in LossTuple._fields}  # average loss
Ejemplo n.º 19
0
    def __init__(self, faster_rcnn):
        """faster_rcnn是继承了faster rcnn基类的子网络"""
        super(FasterRCNNTrainer, self).__init__()
        self.faster_rcnn = faster_rcnn

        # 锚点框相对于真实框的真实偏移量和前景背景标签
        self.anchor_target_creator = AnchorTargetCreator()
        # 候选框相对于真实框的真实偏移量和类别标签
        self.proposal_target_creator = ProposalTargetCreator()

        # 位置估计的均值和标准差
        self.loc_normalize_mean = self.faster_rcnn.loc_normalize_mean
        self.loc_normalize_std = self.faster_rcnn.loc_normalize_std

        # 优化器
        self.optimizer = self.faster_rcnn.get_optimizer()

        # 损失计算的超参数
        self.rpn_sigma = OPT.rpn_sigma
        self.roi_sigma = OPT.roi_sigma

        # 训练过程中的一些评估指标
        # rpn过程的评估指标--混淆矩阵
        self.rpn_cm = ConfusionMeter(2)  # 只有前景和背景两类
        # fast rcnn过程的评估指标--混淆矩阵
        self.roi_cm = ConfusionMeter(OPT.n_fg_class + 1)  # 前景类别数+背景类
        # 损失函数--average loss
        # 每个损失函数都运用一个averagevaluemeter进行求平均
        self.meters = {k: AverageValueMeter() for k in LossTuple._fields}
Ejemplo n.º 20
0
    def __init__(self, faster_rcnn):
        """
        :type faster_rcnn: FasterRCNN
        """
        super(FasterRCNNTrainer, self).__init__()

        self.faster_rcnn = faster_rcnn  # faster_rcnn模块用来进行目标检测

        # 设置rpn和roi的sigma参数
        self.rpn_sigma = faster_rcnn_config.rpn_sigma
        self.roi_sigma = faster_rcnn_config.roi_sigma

        # target creator create gt_bbox gt_label etc as training targets.
        self.anchor_target_creator = AnchorTargetCreator()
        self.proposal_target_creator = ProposalTargetCreator()

        self.loc_normalize_mean = faster_rcnn.loc_normalize_mean
        self.loc_normalize_std = faster_rcnn.loc_normalize_std

        self.optimizer = self.faster_rcnn.get_optimizer()

        # indicators for training status
        self.rpn_cm = ConfusionMeter(2)
        self.roi_cm = ConfusionMeter(21)
        self.meters = {k: AverageValueMeter()
                       for k in LossTuple._fields}  # average loss
Ejemplo n.º 21
0
    def __init__(self, faster_rcnn):
        super(FasterRCNNTrainer, self).__init__()
        #传入的是FasterRCNNVGG16模型,继承了FasterRCNN模型,而参数根据说明 是FasterRCNN模型
        #即初始化的是FasterRCNN模型
        #FasterRCNN模型是父类   FasterRCNNVGG16模型是子类
        self.faster_rcnn = faster_rcnn
        #sigma for l1_smooth_loss
        self.rpn_sigma = opt.rpn_sigma
        self.roi_sigma = opt.roi_sigma

        # target creator create gt_bbox gt_label etc as training targets.
        #目标框creator 目标是产生 真实的bbox 类别标签等
        #将真实的bbox分配给锚点
        self.anchor_target_creator = AnchorTargetCreator()
        self.proposal_target_creator = ProposalTargetCreator()
        #得到faster网络权重,均值 和方差
        self.loc_normalize_mean = faster_rcnn.loc_normalize_mean
        self.loc_normalize_std = faster_rcnn.loc_normalize_std

        #得到faster网络的优化器
        self.optimizer = self.faster_rcnn.get_optimizer()
        # visdom wrapper
        self.vis = Visualizer(env=opt.env)

        # indicators for training status
        #训练状态指标  两个混淆矩阵 2×2(前景后景)   21×21(20类+背景)
        self.rpn_cm = ConfusionMeter(2)
        self.roi_cm = ConfusionMeter(21)
        self.meters = {k: AverageValueMeter()
                       for k in LossTuple._fields}  # average loss 平均损失
Ejemplo n.º 22
0
    def __init__(self):
        super(YoloV1Trainer, self).__init__()
        if opt.gpu_available:
            device = 'cuda'
        else:
            device = 'cpu'
        # self.yolo: [batch_size, 7*7*30]
        self.yolo = YoloV1(opt.img_size).to(device)
        self.optimizer = self.get_optimizer()
        self.before_epoch_num = self.use_pretrain()

        self.dataset = VocDataset(is_train=True)
        self.testset = VocDataset(is_train=False)
        self.dataset_loader = DataLoader(self.dataset, batch_size=opt.batch_size, shuffle=False, num_workers=opt.num_workers)
        self.testset_loader = DataLoader(self.testset, batch_size=opt.batch_size, shuffle=False, num_workers=opt.num_workers)
        self.loss_meter = AverageValueMeter()
Ejemplo n.º 23
0
    def __init__(self, args):
        self.epoch = args.epoch
        self.batch_size = args.batch_size
        self.save_dir = args.save_dir
        self.gpu_mode = args.gpu_mode
        self.device = args.device
        self.lrG = args.lrG
        self.lrD = args.lrD
        self.com_loss = args.com_loss
        self.fine_tune = args.fine_tune
        self.visual = args.visual
        self.env = args.env
        self.d_every = args.d_every
        self.g_every = args.g_every

        if self.fine_tune:
            self.model_G = args.model
            self.model_D = args.model.replace('netG', 'netD')

        # network init
        self.G = NetG()
        if self.com_loss:
            self.D = NLayerDiscriminator(input_nc=4)
        else:
            self.D = NLayerDiscriminator(input_nc=2)

        print(self.G)
        print(self.D)

        if self.fine_tune:
            self.G.load_state_dict(t.load(self.model_G))
            self.D.load_state_dict(t.load(self.model_D))

        self.G_optimizer = t.optim.Adam(self.G.parameters(), lr=self.lrG)
        self.D_optimizer = t.optim.Adam(self.D.parameters(), lr=self.lrD)

        if self.gpu_mode:
            self.G.cuda()
            self.D.cuda()
            self.G_criterion = t.nn.SmoothL1Loss().cuda()
            self.D_criterion = t.nn.MSELoss().cuda()

        self.G_error_meter = AverageValueMeter()
        self.Alpha_loss_meter = AverageValueMeter()
        self.Com_loss_meter = AverageValueMeter()
        self.Adv_loss_meter = AverageValueMeter()
        self.D_error_meter = AverageValueMeter()
Ejemplo n.º 24
0
 def init_meters(self):
     self.rpn_cm = ConfusionMeter(2)  #forground and background
     self.roi_name_cm = ConfusionMeter(
         N_NAMES + 1)  #num of class(including background)
     self.roi_color_cm = ConfusionMeter(
         N_COLORS + 1)  # num of class(including background)
     self.meters = {k: AverageValueMeter()
                    for k in LossTuple._fields}  # average loss
Ejemplo n.º 25
0
    def __init__(self, args):
        self.epoch = args.epoch
        self.batch_size = args.batch_size
        self.save_dir = args.save_dir
        self.gpu_mode = args.gpu_mode
        self.device = args.device
        self.lrG = args.lrG  #Learning Rate, Generator
        self.lrD = args.lrD  #Learning Rate Discriminator
        self.com_loss = args.com_loss  #Compositional Loss, if it does not exist, it signifies that the image is one dimensional
        self.fine_tune = args.fine_tune
        self.visual = args.visual
        self.env = args.env
        self.d_every = args.d_every
        self.g_every = args.g_every

        if self.fine_tune:
            self.model_G = args.model
            self.model_D = args.model.replace('netG', 'netD')

        # network init
        self.G = NetG()
        if self.com_loss:
            self.D = NLayerDiscriminator(input_nc=4)
        else:
            self.D = NLayerDiscriminator(input_nc=2)

        print(self.G)
        print(self.D)

        if self.fine_tune:
            self.G.load_state_dict(t.load(self.model_G))
            self.D.load_state_dict(t.load(self.model_D))

        self.G_optimizer = t.optim.Adam(self.G.parameters(), lr=self.lrG)
        self.D_optimizer = t.optim.Adam(self.D.parameters(), lr=self.lrD)
        if self.gpu_mode:
            self.G.to(self.device)
            self.D.to(self.device)
            self.G_criterion = t.nn.SmoothL1Loss().to(self.device)
            self.D_criterion = t.nn.MSELoss().to(self.device)

        self.G_error_meter = AverageValueMeter()  #Generator Loss
        self.Alpha_loss_meter = AverageValueMeter()  #Alpha Loss
        self.Com_loss_meter = AverageValueMeter()  #Compositional Loss
        self.Adv_loss_meter = AverageValueMeter()  #Adversial Loss
        self.D_error_meter = AverageValueMeter()  #Discriminator Loss
Ejemplo n.º 26
0
def pretrain(dataloader, network, path=None):
    class config:
        lr = 1e-3
        epochs = 100
        path = '../checkpoint/pretrained_net.pth'

    pretrain_config = config()
    if path:
        pretrain_config.path = path
    network.to(device)
    criterion_ = CrossEntropyLoss2d()
    optimiser_ = torch.optim.Adam(network.parameters(), pretrain_config.lr)
    loss_meter = AverageValueMeter()
    for i in range(pretrain_config.epochs):
        loss_meter.reset()

        for i, (img, mask, weak_mask, _) in tqdm(enumerate(dataloader)):
            img, mask = img.to(device), mask.to(device)
            optimiser_.zero_grad()
            output = network(img)
            loss = criterion_(output, mask.squeeze(1))
            loss.backward()
            optimiser_.step()
            loss_meter.add(loss.item())

        # import ipdb
        # ipdb.set_trace()
        print(loss_meter.value()[0])
        torch.save(network.state_dict(), pretrain_config.path)
        # torch.save(network.parameters(),path)
        print('pretrained model saved.')
Ejemplo n.º 27
0
def val(dataloader, net):
    avg_acc=AverageValueMeter()
    avg_acc.reset()
    y_true =[]
    y_predict=[]
    y_predict_proba=[]
    net.eval()
    with t.no_grad():
        for i,(data,target) in enumerate(dataloader):
            data=data.type(t.FloatTensor)
            data = data.cuda()
            target = target.cuda()
            output = net(data)
            decision = output.max(1)[1]
            y_predict.extend(decision.cpu().numpy().tolist())
            proba = F.softmax(output,dim=1)[:,1]
            y_predict_proba.extend(proba.cpu().numpy().tolist())
            y_true.extend(target.cpu().numpy().tolist())
            acc = (decision==target).sum().item()/np.float(len(target))
            avg_acc.add(acc)
    avg_auc = roc_auc_score(y_true,y_predict_proba)

    cnf_matrix = confusion_matrix(y_true, y_predict)
    np.set_printoptions(precision=2)
    # print(avg_auc)
    net.train()
    return avg_acc.value()[0],avg_auc
Ejemplo n.º 28
0
def train():
    vis = Visualizer(server='http://turing.livia.etsmtl.ca', env='EEG')
    data_root = '/home/AN96120/python_project/Seizure Prediction/processed_data/fft_meanlog_std_lowcut0.1highcut180nfreq_bands12win_length_sec60stride_sec60/Dog_1'
    dataloader_train = get_dataloader(data_root, training=True)
    dataloader_test = get_dataloader(data_root, training=False)
    # No interaction has been found in the training and testing dataset.
    weights = t.Tensor([1/(np.array(dataloader_train.dataset.targets)==0).mean(),1/(np.array(dataloader_train.dataset.targets)==1).mean()  ])
    criterion = nn.CrossEntropyLoss(weight=weights.cuda())

    net = convNet ()
    net.cuda()

    optimiser = t.optim.Adam(net.parameters(),lr= 1e-4,weight_decay=1e-4)
    loss_avg = AverageValueMeter()
    epochs = 10000
    for epoch in range(epochs):
        loss_avg.reset()
        for ii, (data, targets) in enumerate(dataloader_train):
            data, targets= data.type(t.FloatTensor), targets.type(t.LongTensor)
            data = data.cuda()
            targets = targets.cuda()
            optimiser.zero_grad()
            output = net(data)
            loss = criterion(output,targets)
            loss_avg.add(loss.item())
            loss.backward()
            optimiser.step()
        vis.plot('loss',loss_avg.value()[0])

        _,auc_train=val(dataloader_train,net)
        _, auc_test =val(dataloader_test,net)
        print(auc_train,auc_test)
Ejemplo n.º 29
0
 def __init__(self, head_detector):
     super(Head_Detector_Trainer, self).__init__()
     self.head_detector = head_detector
     self.rpn_sigma = opt.rpn_sigma
     self.anchor_target_creator = AnchorTargetCreator()
     self.optimizer = self.head_detector.get_optimizer()
     #self.vis = Visualizer(env=opt.env)
     self.rpn_cm = ConfusionMeter(2)
     self.meters = {k: AverageValueMeter() for k in LossTuple._fields}  # average loss
Ejemplo n.º 30
0
def val(val_dataloader, network):
    network.eval()
    dice_meter = AverageValueMeter()
    dice_meter.reset()
    for i, (image, mask, _, _) in enumerate(val_dataloader):
        image, mask = image.to(device), mask.to(device)
        proba = F.softmax(network(image), dim=1)
        predicted_mask = proba.max(1)[1]
        iou = dice_loss(predicted_mask, mask).item()
        dice_meter.add(iou)
    print('val iou:  %.6f' % dice_meter.value()[0])
    return dice_meter.value()[0]
Ejemplo n.º 31
0
def train(**kwargs):
    for k_, v_ in kwargs.items():
        setattr(opt, k_, v_)

    device=t.device('cuda') if opt.gpu else t.device('cpu')
    if opt.vis:
        from visualize import Visualizer
        vis = Visualizer(opt.env)

    # 数据
    transforms = tv.transforms.Compose([
        tv.transforms.Resize(opt.image_size),
        tv.transforms.CenterCrop(opt.image_size),
        tv.transforms.ToTensor(),
        tv.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    dataset = tv.datasets.ImageFolder(opt.data_path, transform=transforms)
    dataloader = t.utils.data.DataLoader(dataset,
                                         batch_size=opt.batch_size,
                                         shuffle=True,
                                         num_workers=opt.num_workers,
                                         drop_last=True
                                         )

    # 网络
    netg, netd = NetG(opt), NetD(opt)
    map_location = lambda storage, loc: storage
    if opt.netd_path:
        netd.load_state_dict(t.load(opt.netd_path, map_location=map_location))
    if opt.netg_path:
        netg.load_state_dict(t.load(opt.netg_path, map_location=map_location))
    netd.to(device)
    netg.to(device)


    # 定义优化器和损失
    optimizer_g = t.optim.Adam(netg.parameters(), opt.lr1, betas=(opt.beta1, 0.999))
    optimizer_d = t.optim.Adam(netd.parameters(), opt.lr2, betas=(opt.beta1, 0.999))
    criterion = t.nn.BCELoss().to(device)

    # 真图片label为1,假图片label为0
    # noises为生成网络的输入
    true_labels = t.ones(opt.batch_size).to(device)
    fake_labels = t.zeros(opt.batch_size).to(device)
    fix_noises = t.randn(opt.batch_size, opt.nz, 1, 1).to(device)
    noises = t.randn(opt.batch_size, opt.nz, 1, 1).to(device)

    errord_meter = AverageValueMeter()
    errorg_meter = AverageValueMeter()


    epochs = range(opt.max_epoch)
    for epoch in iter(epochs):
        for ii, (img, _) in tqdm.tqdm(enumerate(dataloader)):
            real_img = img.to(device)

            if ii % opt.d_every == 0:
                # 训练判别器
                optimizer_d.zero_grad()
                ## 尽可能的把真图片判别为正确
                output = netd(real_img)
                error_d_real = criterion(output, true_labels)
                error_d_real.backward()

                ## 尽可能把假图片判别为错误
                noises.data.copy_(t.randn(opt.batch_size, opt.nz, 1, 1))
                fake_img = netg(noises).detach()  # 根据噪声生成假图
                output = netd(fake_img)
                error_d_fake = criterion(output, fake_labels)
                error_d_fake.backward()
                optimizer_d.step()

                error_d = error_d_fake + error_d_real

                errord_meter.add(error_d.item())

            if ii % opt.g_every == 0:
                # 训练生成器
                optimizer_g.zero_grad()
                noises.data.copy_(t.randn(opt.batch_size, opt.nz, 1, 1))
                fake_img = netg(noises)
                output = netd(fake_img)
                error_g = criterion(output, true_labels)
                error_g.backward()
                optimizer_g.step()
                errorg_meter.add(error_g.item())

            if opt.vis and ii % opt.plot_every == opt.plot_every - 1:
                ## 可视化
                if os.path.exists(opt.debug_file):
                    ipdb.set_trace()
                fix_fake_imgs = netg(fix_noises)
                vis.images(fix_fake_imgs.detach().cpu().numpy()[:64] * 0.5 + 0.5, win='fixfake')
                vis.images(real_img.data.cpu().numpy()[:64] * 0.5 + 0.5, win='real')
                vis.plot('errord', errord_meter.value()[0])
                vis.plot('errorg', errorg_meter.value()[0])

        if (epoch+1) % opt.save_every == 0:
            # 保存模型、图片
            tv.utils.save_image(fix_fake_imgs.data[:64], '%s/%s.png' % (opt.save_path, epoch), normalize=True,
                                range=(-1, 1))
            t.save(netd.state_dict(), 'checkpoints/netd_%s.pth' % epoch)
            t.save(netg.state_dict(), 'checkpoints/netg_%s.pth' % epoch)
            errord_meter.reset()
            errorg_meter.reset()