Exemplo n.º 1
0
class LoggerTBX:
    def __init__(self, log_dir=None):
        if log_dir is not None:
            log_dir = os.path.join(config.PATHS.LOGDIR, log_dir)
        self.writer = SummaryWriter(log_dir=log_dir)
        self.first_group_keys = ['loss', 'reg_loss', 'clf_loss']
        self.second_group_keys = ['map_all', 'map_pathology']

    def __call__(self, train_metr, valid_metr, datas, i=None):
        self.write_scalars(train_metr, valid_metr, i)
        self.write_images(datas, i)

    def write_scalars(self, train_metr, valid_metr, i=None):
        self.writer.add_scalars(
            'loss_group',
            self._group_meters(train_metr, valid_metr, self.first_group_keys),
            i)
        self.writer.add_scalars(
            'metric_group',
            self._group_meters(train_metr, valid_metr, self.second_group_keys),
            i)

    def write_images(self, datas, i=None):
        for data in datas:
            annotation, bboxes, scores = ms._extract_meta(data)
            filename = data['pid']
            image = cv2.imread(os.path.join(config.PATHS.IMAGES, filename))
            self.writer.add_image_with_boxes('{}/annot'.format(data['pid']),
                                             image,
                                             annotation[:, :4],
                                             0,
                                             dataformats='HWC')
            self.writer.add_image_with_boxes('{}/preds'.format(data['pid']),
                                             image,
                                             bboxes[:, :4],
                                             i,
                                             dataformats='HWC')

    def _group_meters(self, train_metr, valid_metr, keys):
        group = {k + '_train': train_metr[k] for k in keys}
        group.update({k + '_valid': valid_metr[k] for k in keys})
        return group
Exemplo n.º 2
0
for n_iter in range(100):
    s1 = torch.rand(1)  # value to keep
    s2 = torch.rand(1)
    # data grouping by `slash`
    writer.add_scalar('data/scalar_systemtime', s1[0], n_iter, summary_description="# markdown is supported!")
    # data grouping by `slash`
    writer.add_scalar('data/scalar_customtime', s1[0], n_iter, walltime=n_iter, display_name="dudubird")
    writer.add_scalars('data/scalar_group', {"xsinx": n_iter * np.sin(n_iter),
                                             "xcosx": n_iter * np.cos(n_iter),
                                             "arctanx": np.arctan(n_iter)}, n_iter)
    x = torch.rand(32, 3, 64, 64)  # output from network
    if n_iter % 10 == 0:
        x = vutils.make_grid(x, normalize=True, scale_each=True)
        writer.add_image('Image', x, n_iter)  # Tensor
        writer.add_image_with_boxes('imagebox_label', torch.ones(3, 240, 240) * 0.5,
             torch.Tensor([[10, 10, 100, 100], [101, 101, 200, 200]]),
             n_iter, 
             labels=['abcde' + str(n_iter), 'fgh' + str(n_iter)])
        if not skip_audio:
            x = torch.zeros(sample_rate * 2)
            for i in range(x.size(0)):
                # sound amplitude should in [-1, 1]
                x[i] = np.cos(freqs[n_iter // 10] * np.pi *
                            float(i) / float(sample_rate))
            writer.add_audio('myAudio', x, n_iter)
        writer.add_text('Text', 'text logged at step:' + str(n_iter), n_iter)
        writer.add_text('markdown Text', '''a|b\n-|-\nc|d''', n_iter)
        for name, param in resnet18.named_parameters():
            if 'bn' not in name:
                writer.add_histogram(name, param, n_iter)
        writer.add_pr_curve('xoxo', np.random.randint(2, size=100), np.random.rand(
            100), n_iter)  # needs tensorboard 0.4RC or later
Exemplo n.º 3
0
def train(opt):
    date = datetime.date(datetime.now())
    logs = '../logs/'
    logdir = os.path.join(logs,str(date))
    if not os.path.exists(logdir):
        os.mkdir(logdir)
    else:
        logdir = logdir+"_"+str(np.random.randint(0,1000))
        os.mkdir(logdir)
    
    train_data = AllInOneData(opt.train_path,set='train',transforms=transforms.Compose([Normalizer(),Resizer()]))
    train_generator = torch.utils.data.DataLoader(train_data,batch_size=opt.batch_size,shuffle=True,num_workers=8,
                                                    collate_fn=collater,drop_last=True)

    valid_data = AllInOneData(opt.train_path,set='validation',transforms=transforms.Compose([Normalizer(),Resizer()]))
    valid_generator = torch.utils.data.DataLoader(valid_data,batch_size=opt.batch_size,shuffle=False,num_workers=8,
                                                    collate_fn=collater,drop_last=True)
    
    device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
    model = EfficientDetMultiBackbone(opt.train_path,compound_coef=0,heads=opt.heads)
    model.to(device)

    min_val_loss = 10e5
    
    if opt.optim == 'Adam':
        optimizer = torch.optim.AdamW(model.parameters(),lr=opt.lr)
    else:
        optimizer = torch.optim.SGD(model.parameters(),lr=opt.lr,momentum = opt.momentum,nesterov=True)

    scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, opt.lr, total_steps=None, epochs=opt.epochs,
                                                    steps_per_epoch=len(train_generator), pct_start=0.1, anneal_strategy='cos',
                                                    cycle_momentum=True, base_momentum=0.85, max_momentum=0.95, 
                                                    div_factor=25.0, final_div_factor=1000.0, last_epoch=-1)

    criterion = MTLoss(heads = opt.heads, device = device)
    
    print('Model is successfully initiated')
    print(f'Targets are {opt.heads}.')
    verb_loss = 0
    writer = SummaryWriter(logdir=logdir,filename_suffix=f'Train_{"_".join(opt.heads)}',comment='try1')
    
    for epoch in range(opt.epochs):
        model.train()
        Losses = {k:[] for k in opt.heads}
        description = f'Epoch:{epoch}| Total Loss:{verb_loss}'
        progress_bar = tqdm(train_generator,desc = description)
        Total_loss = []
        for sample in progress_bar:
                        
            imgs = sample['img'].to(device)
            gt_person_bbox = sample['person_bbox'].to(device)
            gt_face_bbox = sample['face_bbox'].to(device)
            gt_pose = sample['pose'].to(device)
            gt_face_landmarks = sample['face_landmarks'].to(device)
            gt_age = sample['age'].to(device)
            gt_race = sample['race'].to(device)
            gt_gender = sample['gender'].to(device)
            gt_skin = sample['skin'].to(device)
            gt_emotions = sample['emotion'].to(device)        

            out = model(imgs)
            annot = {'person':gt_person_bbox,'gender':gt_gender,
                     'face':gt_face_bbox,'emotions':gt_emotions,
                     'face_landmarks':gt_face_landmarks,
                     'pose':gt_pose}
            
            losses, lm_mask = criterion(out,annot,out['anchors'])
            loss = torch.zeros(1).to(device)
            loss = torch.sum(torch.cat(list(losses.values())))
            loss.backward()
            optimizer.step()
            scheduler.step() 

            verb_loss = loss.detach().cpu().numpy()
            Total_loss.append(verb_loss)
            description = f'Epoch:{epoch}| Total Loss:{verb_loss}|'
            for k,v in losses.items():
                Losses[k].append(v.detach().cpu().numpy())
                description+=f'{k}:{round(np.mean(Losses[k]),1)}|'
            progress_bar.set_description(description)
            optimizer.zero_grad()
        
        writer.add_scalar('Train/Total',round(np.mean(Total_loss),2),epoch)
        for k in Losses.keys():
            writer.add_scalar(f"Train/{k}",round(np.mean(Losses[k]),2),epoch)
        
        if epoch%opt.valid_step==0:
            im = (imgs[0]+1)/2*255
            
            regressBoxes = BBoxTransform()
            clipBoxes = ClipBoxes()
            pp = postprocess(imgs,
                  out['anchors'], out['person'], out['gender'],
                  regressBoxes, clipBoxes,
                  0.4, 0.4)
            
            writer.add_image_with_boxes('Train/Box_prediction',im,pp[0]['rois'],epoch)
            img2 = out['face_landmarks']
            if img2.shape[1]>3:
                img2 = img2.sum(axis=1).unsqueeze(1)*255
                lm_mask = lm_mask.sum(axis=1).unsqueeze(1)*255
            writer.add_images('Train/landmarks_prediction',img2,epoch)
            writer.add_images('Train/landmark target', lm_mask,epoch)
            
            #VALIDATION STEPS
            model.eval()
            with torch.no_grad():
                valid_Losses = {k:[] for k in opt.heads}

                val_description = f'Validation| Total Loss:{verb_loss}'
                progress_bar = tqdm(valid_generator,desc = val_description)
                Total_loss = []
                for sample in progress_bar:   
                    imgs = sample['img'].to(device)
                    gt_person_bbox = sample['person_bbox'].to(device)
                    gt_face_bbox = sample['face_bbox'].to(device)
                    gt_pose = sample['pose'].to(device)
                    gt_face_landmarks = sample['face_landmarks'].to(device)
                    gt_age = sample['age'].to(device)
                    gt_race = sample['race'].to(device)
                    gt_gender = sample['gender'].to(device)
                    gt_skin = sample['skin'].to(device)
                    gt_emotions = sample['emotion'].to(device)
                    out = model(imgs)
                    annot = {'person':gt_person_bbox,'gender':gt_gender,
                     'face':gt_face_bbox,'emotions':gt_emotions,
                     'face_landmarks':gt_face_landmarks,
                     'pose':gt_pose}

                    losses, lm_mask = criterion(out,annot,out['anchors'])

                    loss = torch.zeros(1).to(device)
                    loss = torch.sum(torch.cat(list(losses.values())))
                    verb_loss = loss.detach().cpu().numpy()
                    Total_loss.append(verb_loss)
                    val_description = f'Validation| Total Loss:{verb_loss}|'
                    for k,v in losses.items():
                        valid_Losses[k].append(v.detach().cpu().numpy())
                        val_description+=f'{k}:{round(np.mean(valid_Losses[k]),1)}|'
                    progress_bar.set_description(val_description)

                writer.add_scalar('Validation/Total',round(np.mean(Total_loss),2),epoch)
                for k in valid_Losses.keys():
                    writer.add_scalar(f"Validation/{k}",round(np.mean(valid_Losses[k]),2),epoch)

                im = (imgs[0]+1)/2*255
                
                regressBoxes = BBoxTransform()
                clipBoxes = ClipBoxes()
                pp = postprocess(imgs,
                  out['anchors'], out['person'], out['gender'],
                  regressBoxes, clipBoxes,
                  0.4, 0.4)

                writer.add_image_with_boxes('Validation/Box_prediction',im,pp[0]['rois'],epoch)
                
                img2 = out['face_landmarks']
                if img2.shape[1]>3:
                    img2 = img2.sum(axis=1).unsqueeze(1)*255
                    lm_mask = lm_mask.sum(axis=1).unsqueeze(1)*255
                writer.add_images('Validation/landmarks_prediction',img2,epoch)
                writer.add_images('Validation/landmark target', lm_mask,epoch)

                if verb_loss<min_val_loss:
                    print("The model improved and checkpoint is saved.")
                    torch.save(model.state_dict(),f'{logdir}/{opt.save_name.split(".pt")[0]}_best_epoch_{epoch}.pt')
                    min_val_loss = verb_loss
                

        if epoch%100==0:
            torch.save(model.state_dict(),f'{logdir}/{opt.save_name.split(".pt")[0]}_epoch_{epoch}.pt')
    torch.save(model.state_dict(),f'{logdir}/{opt.save_name.split(".pt")[0]}_last.pt')
    writer.close()
Exemplo n.º 4
0
        writer.add_audio('myAudio',
                         dummy_audio,
                         n_iter,
                         sample_rate=sample_rate)

        writer.add_text('Text', 'text logged at step:' + str(n_iter), n_iter)

        # for name, param in resnet18.named_parameters():
        #     writer.add_histogram(name, param.clone().cpu().data.numpy(), n_iter)

        # needs tensorboard 0.4RC or later
        writer.add_pr_curve('xoxo', np.random.randint(2, size=100),
                            np.random.rand(100), n_iter)

root = os.getcwd()
data_root = os.path.join(root, 'DATA')
print(data_root)
dataset = datasets.MNIST(root=data_root, train=False, download=False)
images = dataset.test_data[:100].float()
label = dataset.test_labels[:100]
print(images.size())
print(label.size())
features = images.view(100, 784)
writer.add_embedding(features, metadata=label, label_img=images.unsqueeze(1))
#
writer.add_image_with_boxes('CoCo', torch.rand(3, 128, 128),
                            torch.tensor([[10, 10, 50, 50]]))
# export scalar data to JSON for external processing
writer.export_scalars_to_json("./all_scalars.json")
writer.close()
Exemplo n.º 5
0
 writer.add_scalars(
     'data/scalar_group', {
         "xsinx": n_iter * np.sin(n_iter),
         "xcosx": n_iter * np.cos(n_iter),
         "arctanx": np.arctan(n_iter)
     }, n_iter)
 x = torch.rand(32, 3, 64, 64)  # output from network
 if n_iter % 10 == 0:
     x = vutils.make_grid(x, normalize=True, scale_each=True)
     writer.add_image('Image', x, n_iter)  # Tensor
     # writer.add_image_with_boxes('imagebox_label', torch.ones(3, 240, 240) * 0.5,
     #      torch.Tensor([[10, 10, 100, 100], [101, 101, 200, 200]]),
     #      n_iter,
     #      labels=['abcde' + str(n_iter), 'fgh' + str(n_iter)])
     writer.add_image_with_boxes(
         'imagebox', x, torch.Tensor([[10, 10, 40, 40], [40, 40, 60, 60]]),
         n_iter)
     x = torch.zeros(sample_rate * 2)
     for i in range(x.size(0)):
         # sound amplitude should in [-1, 1]
         x[i] = np.cos(freqs[n_iter // 10] * np.pi * float(i) /
                       float(sample_rate))
     writer.add_audio('myAudio', x, n_iter)
     writer.add_text('Text', 'text logged at step:' + str(n_iter), n_iter)
     writer.add_text('markdown Text', '''a|b\n-|-\nc|d''', n_iter)
     for name, param in resnet18.named_parameters():
         if 'bn' not in name:
             writer.add_histogram(name, param, n_iter)
     writer.add_pr_curve('xoxo', np.random.randint(2, size=100),
                         np.random.rand(100),
                         n_iter)  # needs tensorboard 0.4RC or later
Exemplo n.º 6
0
class Summarizer(object):
    def __init__(self):
        self.report = False
        self.global_step = None
        self.writer = None

    def initialize_writer(self, log_dir):
        self.writer = SummaryWriter(log_dir)

    def add_scalar(self, tag, scalar_value, global_step=None, walltime=None):
        if not self.report:
            return

        if global_step is None and self.global_step is not None:
            global_step = self.global_step

        self.writer.add_scalar(tag,
                               scalar_value,
                               global_step=global_step,
                               walltime=walltime)

    def add_scalars(self,
                    main_tag,
                    tag_scalar_dict,
                    global_step=None,
                    walltime=None):
        if not self.report:
            return

        if global_step is None and self.global_step is not None:
            global_step = self.global_step

        self.writer.add_scalars(self,
                                main_tag,
                                tag_scalar_dict,
                                global_step=global_step,
                                walltime=walltime)

    def add_histogram(self,
                      tag,
                      values,
                      global_step=None,
                      bins='tensorflow',
                      walltime=None):
        if not self.report:
            return

        if global_step is None and self.global_step is not None:
            global_step = self.global_step

        if isinstance(values, chainer.cuda.cupy.ndarray):
            values = chainer.cuda.to_cpu(values)

        self.writer.add_histogram(tag,
                                  values,
                                  global_step=global_step,
                                  bins=bins,
                                  walltime=walltime)

    def add_image(self, tag, img_tensor, global_step=None, walltime=None):
        if not self.report:
            return

        if global_step is None and self.global_step is not None:
            global_step = self.global_step

        self.writer.add_image(tag,
                              img_tensor,
                              global_step=global_step,
                              walltime=walltime)

    def add_image_with_boxes(self,
                             tag,
                             img_tensor,
                             box_tensor,
                             global_step=None,
                             walltime=None,
                             **kwargs):
        if not self.report:
            return

        if global_step is None and self.global_step is not None:
            global_step = self.global_step

        self.writer.add_image_with_boxes(tag,
                                         img_tensor,
                                         box_tensor,
                                         global_step=global_step,
                                         walltime=walltime,
                                         **kwargs)

    def add_figure(self,
                   tag,
                   figure,
                   global_step=None,
                   close=True,
                   walltime=None):
        if not self.report:
            return

        if global_step is None and self.global_step is not None:
            global_step = self.global_step

        self.writer.add_figure(tag,
                               figure,
                               global_step=global_step,
                               close=close,
                               walltime=walltime)

    def add_video(self,
                  tag,
                  vid_tensor,
                  global_step=None,
                  fps=4,
                  walltime=None):
        if not self.report:
            return

        if global_step is None and self.global_step is not None:
            global_step = self.global_step

        self.writer.add_video(tag,
                              vid_tensor,
                              global_step=global_step,
                              fps=fps,
                              walltime=walltime)

    def add_audio(self,
                  tag,
                  snd_tensor,
                  global_step=None,
                  sample_rate=44100,
                  walltime=None):
        if not self.report:
            return

        if global_step is None and self.global_step is not None:
            global_step = self.global_step

        self.writer.add_audio(tag,
                              snd_tensor,
                              global_step=global_step,
                              sample_rate=sample_rate,
                              walltime=walltime)

    def add_text(self, tag, text_string, global_step=None, walltime=None):
        if not self.report:
            return

        if global_step is None and self.global_step is not None:
            global_step = self.global_step

        self.writer.add_text(tag,
                             text_string,
                             global_step=global_step,
                             walltime=walltime)

    def add_graph_onnx(self, prototxt):
        if not self.report:
            return

        self.writer.add_graph_onnx(self, prototxt)

    def add_graph(self, model, input_to_model=None, verbose=False, **kwargs):
        if not self.report:
            return

        self.writer.add_graph(model,
                              input_to_model=input_to_model,
                              verbose=verbose,
                              **kwargs)

    def add_embedding(self,
                      mat,
                      metadata=None,
                      label_img=None,
                      global_step=None,
                      tag='default',
                      metadata_header=None):
        if not self.report:
            return

        if global_step is None and self.global_step is not None:
            global_step = self.global_step

        self.writer.add_embedding(mat,
                                  metadata=metadata,
                                  label_img=label_img,
                                  global_step=global_step,
                                  tag=tag,
                                  metadata_header=metadata_header)

    def add_pr_curve(self,
                     tag,
                     labels,
                     predictions,
                     global_step=None,
                     num_thresholds=127,
                     weights=None,
                     walltime=None):
        if not self.report:
            return

        if global_step is None and self.global_step is not None:
            global_step = self.global_step

        self.writer.add_pr_curve(tag,
                                 labels,
                                 predictions,
                                 global_step=global_step,
                                 num_thresholds=num_thresholds,
                                 weights=weights,
                                 walltime=walltime)

    def add_pr_curve_raw(self,
                         tag,
                         true_positive_counts,
                         false_positive_counts,
                         true_negative_counts,
                         false_negative_counts,
                         precision,
                         recall,
                         global_step=None,
                         num_thresholds=127,
                         weights=None,
                         walltime=None):
        if not self.report:
            return

        if global_step is None and self.global_step is not None:
            global_step = self.global_step

        self.writer.add_pr_curve_raw(tag,
                                     true_positive_counts,
                                     false_positive_counts,
                                     true_negative_counts,
                                     false_negative_counts,
                                     precision,
                                     recall,
                                     global_step=global_step,
                                     num_thresholds=num_thresholds,
                                     weights=weights,
                                     walltime=walltime)

    def add_custom_scalars_multilinechart(self,
                                          tags,
                                          category='default',
                                          title='untitled'):
        if not self.report:
            return
        self.writer.add_custom_scalars_multilinechart(tags,
                                                      category=category,
                                                      title=title)

    def add_custom_scalars_marginchart(self,
                                       tags,
                                       category='default',
                                       title='untitled'):
        if not self.report:
            return
        self.writer.add_custom_scalars_marginchart(tags,
                                                   category=category,
                                                   title=title)

    def add_custom_scalars(self, layout):
        if not self.report:
            return
        self.writer.add_custom_scalars(layout)