Ejemplo n.º 1
0
    def validate(self):
        print 'Validating'
        self.model.eval()
        # Leave LSTM in train mode

        with torch.no_grad():
            ious = []
            for step, data in enumerate(tqdm(self.val_loader)):
                if len(data['orig_poly']) == 1:
                    continue
                if self.opts['get_point_annotation']:
                    img = data['img'].to(device)
                    annotation = data['annotation_prior'].to(device).unsqueeze(
                        1)
                    img = torch.cat([img, annotation], 1)
                else:
                    img = data['img'].to(device)
                output = self.model.forward(img, data['fwd_poly'])
                pred_cps = output['pred_polys'][-1]
                pred_polys = self.spline.sample_point(pred_cps)
                pred_polys = pred_polys.data.cpu().numpy()
                # print(pred_polys.shape)
                # Get IoU
                iou = 0
                orig_poly = data['orig_poly']
                for i in range(pred_polys.shape[0]):
                    curr_pred_poly = utils.poly01_to_poly0g(
                        pred_polys[i], self.model.grid_size)
                    curr_gt_poly = utils.poly01_to_poly0g(
                        orig_poly[i], self.model.grid_size)
                    i, masks = metrics.iou_from_poly(
                        np.array(curr_pred_poly, dtype=np.int32),
                        np.array(curr_gt_poly, dtype=np.int32),
                        self.model.grid_size, self.model.grid_size)
                    iou += i
                iou = iou / pred_polys.shape[0]
                ious.append(iou)
                del (output)
                del (pred_polys)
            iou = np.mean(ious)
            self.val_writer.add_scalar('iou', float(iou), self.global_step)
            print '[VAL] IoU: %f' % iou
            masks = np.tile(masks, [1, 1, 1, 3])  # Make [2, H, W, 3]
            img = (data['img'].cpu().numpy()[-1, ...] * 255).astype(np.uint8)
            img = np.transpose(img, [1, 2, 0])  # Make [H, W, 3]
            #self.val_writer.add_image('pred_mask', masks[0], self.global_step)
            #self.val_writer.add_image('gt_mask', masks[1], self.global_step)
            #self.val_writer.add_image('image', img, self.global_step)
        self.model.train()
Ejemplo n.º 2
0
    def validate(self):
        print 'Validating'
        ggnn_grid_size = self.opts['ggnn_grid_size']
        self.model.ggnn.encoder.eval()
        self.model.temperature = 0
        self.model.mode = "test"
        # Leave LSTM in train mode

        with torch.no_grad():
            ious = []
            for step, data in enumerate(tqdm(self.val_loader)):

                output = self.model(data['img'].to(device),
                                    data['fwd_poly'].to(device))
                pred_polys = output['pred_polys'].data.numpy()

                # Get IoU
                iou = 0
                orig_poly = data['orig_poly']

                for i in range(pred_polys.shape[0]):

                    p = pred_polys[i]

                    mask_poly = utils.get_masked_poly(
                        p, self.model.ggnn.ggnn_grid_size)
                    mask_poly = utils.class_to_xy(
                        mask_poly, self.model.ggnn.ggnn_grid_size)

                    curr_gt_poly_112 = utils.poly01_to_poly0g(
                        orig_poly[i], ggnn_grid_size)

                    i, masks = metrics.iou_from_poly(
                        np.array(mask_poly, dtype=np.int32),
                        np.array(curr_gt_poly_112, dtype=np.int32),
                        ggnn_grid_size, ggnn_grid_size)

                    iou += i

                iou = iou / pred_polys.shape[0]
                ious.append(iou)

                del (output)
                del (pred_polys)

            iou = np.mean(ious)
            self.val_writer.add_scalar('iou', float(iou), self.global_step)

            print '[VAL] IoU: %f' % iou

        self.model.temperature = self.opts['temperature']
        self.model.mode = "train_ggnn"
        self.model.ggnn.encoder.train()
Ejemplo n.º 3
0
    def prepare_component(self, instance, component):
        """
        Prepare a single component within an instance
        """
        get_poly = 'train' in self.mode or 'tool' in self.mode

        if self.opts['flip']:
            example_flip = random.random() >= 0.5
        else:
            example_flip = False

        if self.opts['random_start']:
            random_start = random.random() >= 0.5
        else:
            random_start = False

        lo,hi = self.opts['random_context']
        context_expansion = random.uniform(lo, hi)

        crop_info = self.extract_crop(component, instance, context_expansion)

        img = crop_info['img']

        if example_flip:
            img = np.fliplr(img)

        train_dict = {}
        if get_poly:
            poly = crop_info['poly']

            if example_flip:
                # Flip polygon
                poly[:,0] = 1. - poly[:,0]

            if random_start:
                poly = np.roll(poly, random.choice(range(poly.shape[0])), axis=0)

            orig_poly = poly.copy()

            # Convert from [0, 1] to [0, grid_side]
            poly = utils.poly01_to_poly0g(poly, self.opts['grid_side'])
            fwd_poly = poly

            if 'train' in self.mode:
                # Get masks
                vertex_mask = np.zeros((self.opts['grid_side'], self.opts['grid_side']), np.float32)
                edge_mask = np.zeros((self.opts['grid_side'], self.opts['grid_side']), np.float32)

                vertex_mask = utils.get_vertices_mask(poly, vertex_mask)
                edge_mask = utils.get_edge_mask(poly, edge_mask)

                # Don't append first_v to end if in tool mode
                fwd_poly = np.append(fwd_poly, [fwd_poly[0]], axis=0)

            bwd_poly = fwd_poly[::-1]

            if example_flip:
                fwd_poly, bwd_poly = bwd_poly, fwd_poly

            arr_fwd_poly = np.ones((self.opts['max_poly_len'], 2), np.float32) * -1
            arr_bwd_poly = np.ones((self.opts['max_poly_len'], 2), np.float32) * -1
            arr_mask = np.zeros(self.opts['max_poly_len'], np.int32)

            len_to_keep = min(len(fwd_poly), self.opts['max_poly_len'])

            arr_fwd_poly[:len_to_keep] = fwd_poly[:len_to_keep]
            arr_bwd_poly[:len_to_keep] = bwd_poly[:len_to_keep]
            arr_mask[:len_to_keep+1] = 1
            # Numpy doesn't throw an error if the last index is greater than size

            train_dict = {
                'fwd_poly': arr_fwd_poly,
                'bwd_poly': arr_bwd_poly,
                'mask': arr_mask,
                'orig_poly': orig_poly,
                'full_poly': fwd_poly,
            }

            if 'train' in self.mode:
                train_dict['vertex_mask'] = vertex_mask
                train_dict['edge_mask'] = edge_mask
                train_dict['label'] = instance['label']

        # for Torch, use CHW, instead of HWC
        img = img.transpose(2,0,1)

        return_dict = {
            'img': img,
            'img_path': instance['img_path'],
            'patch_w': crop_info['patch_w'],
            'starting_point': crop_info['starting_point']
        }

        return_dict.update(train_dict)

        return return_dict
    def prepare_component(self, instance, component):
        """
        Prepare a single component within an instance
        """
        get_gt_poly = 'train' in self.mode or 'oracle' in self.mode
        max_num = self.opts['p_num']
        pnum = self.opts['p_num']
        cp_num = self.opts['cp_num']

        # create circle polygon data
        pointsnp = np.zeros(shape=(cp_num, 2), dtype=np.float32)
        for i in range(cp_num):
            thera = 1.0 * i / cp_num * 2 * np.pi
            x = np.cos(thera)
            y = -np.sin(thera)
            pointsnp[i, 0] = x
            pointsnp[i, 1] = y

        fwd_poly = (0.7 * pointsnp + 1) / 2

        arr_fwd_poly = np.ones((cp_num, 2), np.float32) * 0.
        arr_fwd_poly[:, :] = fwd_poly

        lo, hi = self.opts['random_context']
        context_expansion = random.uniform(lo, hi)

        crop_info = self.extract_crop(component, instance, context_expansion)

        img = crop_info['img']

        train_dict = {}
        if get_gt_poly:
            poly = crop_info['poly']

            orig_poly = poly.copy()

            gt_orig_poly = poly.copy()
            gt_orig_poly = utils.poly01_to_poly0g(gt_orig_poly, 28)
            # Get masks
            vertex_mask = np.zeros((28, 28), np.float32)
            edge_mask = np.zeros((28, 28), np.float32)
            vertex_mask = utils.get_vertices_mask(gt_orig_poly, vertex_mask)
            edge_mask = utils.get_edge_mask(gt_orig_poly, edge_mask)

            if self.opts['get_point_annotation']:
                gt_poly_224 = np.floor(orig_poly *
                                       self.opts['img_side']).astype(np.int32)
                if self.opts['ext_points']:
                    ex_0, ex_1, ex_2, ex_3 = utils.extreme_points(
                        gt_poly_224, pert=self.opts['ext_points_pert'])
                    nodes = [ex_0, ex_1, ex_2, ex_3]
                    point_annotation = utils.make_gt(nodes,
                                                     h=self.opts['img_side'],
                                                     w=self.opts['img_side'])
                    target_annotation = np.array([[0, 0]])
            gt_poly = self.uniformsample(poly, pnum)
            sampled_poly = self.uniformsample(poly, 70)
            arr_gt_poly = np.ones((pnum, 2), np.float32) * 0.
            arr_gt_poly[:, :] = gt_poly

            ff = utils.poly01_to_poly0g(arr_gt_poly, 36)
            #poly_mask = np.zeros((224, 224), np.float32)
            #poly_mask = utils.get_vertices_mask(ff, poly_mask)
            #poly_mask11 = np.ones((224, 224), np.float32) * 0.
            #poly_mask11[:, :] = poly_mask

            # Numpy doesn't throw an error if the last index is greater than size
            if self.opts['get_point_annotation']:
                train_dict = {
                    'target_annotation': target_annotation,
                    'sampled_poly': sampled_poly,
                    'orig_poly': orig_poly,
                    'gt_poly': arr_gt_poly,
                    'annotation_prior': point_annotation
                }
            else:
                train_dict = {
                    'sampled_poly': sampled_poly,
                    'orig_poly': orig_poly,
                    'gt_poly': arr_gt_poly
                }

            boundry_dic = {
                'vertex_mask': vertex_mask,
                'gt_orig_poly': gt_orig_poly,
                'poly_mask': ff,
                'edge_mask': edge_mask
            }
            train_dict.update(boundry_dic)
            if 'train' in self.mode:
                train_dict['label'] = instance['label']

        # for Torch, use CHW, instead of HWC
        img = img.transpose(2, 0, 1)
        # blank_image
        return_dict = {
            'img': img,
            'fwd_poly': arr_fwd_poly,
            'img_path': instance['img_path'],
            'patch_w': crop_info['patch_w'],
            'starting_point': crop_info['starting_point'],
            'context_expansion': context_expansion
        }

        return_dict.update(train_dict)

        return return_dict
Ejemplo n.º 5
0
    def prepare_component(self, instance, component):
        """
        Prepare a single component within an instance
        """
        get_gt_poly = 'train' in self.mode or 'oracle' in self.mode
        max_num = self.opts['p_num']
        pnum = self.opts['p_num']
        cp_num = self.opts['cp_num']

        # create circle polygon data
        pointsnp = np.zeros(shape=(cp_num, 2), dtype=np.float32)
        for i in range(cp_num):
            thera = 1.0 * i / cp_num * 2 * np.pi
            x = np.cos(thera)
            y = -np.sin(thera)
            pointsnp[i, 0] = x
            pointsnp[i, 1] = y

        fwd_poly = (0.7 * pointsnp + 1) / 2


        arr_fwd_poly = np.ones((cp_num, 2), np.float32) * 0.
        arr_fwd_poly[:, :] = fwd_poly

        lo, hi = self.opts['random_context']
        context_expansion = random.uniform(lo, hi)

        crop_info = self.extract_crop(component, instance, context_expansion)

        img = crop_info['img']

        ## get the onehot labels and dist_maps for boundary loss
        train_dict = {}



        if get_gt_poly:
            poly = crop_info['poly']

            orig_poly = poly.copy()

            gt_orig_poly = poly.copy()
            gt_orig_poly = utils.poly01_to_poly0g(gt_orig_poly, 28)
            # Get masks
            vertex_mask = np.zeros((28, 28), np.float32)
            edge_mask = np.zeros((28, 28), np.float32)
            vertex_mask = utils.get_vertices_mask(gt_orig_poly, vertex_mask)
            edge_mask = utils.get_edge_mask(gt_orig_poly, edge_mask)


            if 'train' in self.mode:
                mask = np.asarray(crop_info['mask'])  # wh
                mask_tensor = torch.from_numpy(mask)
                onehot_label = utils.class2one_hot(mask_tensor, 2)[0]
                mask_distmap = utils.one_hot2dist(onehot_label)


            if self.opts['get_point_annotation']:
                gt_poly_224 = np.floor(orig_poly * self.opts['img_side']).astype(np.int32)
                if self.opts['ext_points']:
                    ex_0, ex_1, ex_2, ex_3 = utils.extreme_points(gt_poly_224, pert=self.opts['ext_points_pert'])
                    nodes = [ex_0, ex_1, ex_2, ex_3]
                    point_annotation = utils.make_gt(nodes, h=self.opts['img_side'], w=self.opts['img_side'])
                    target_annotation = np.array([[0, 0]])
            gt_poly = self.uniformsample(poly, pnum)
            sampled_poly = self.uniformsample(poly, 70)
            #sampled_interactive = self.uniformsample(poly, 40)
            ## uniformsample by 1280%32
            interactive_temp = []
            for i in range(pnum): #pnum: 1280
                if i% 32 == 0:
                    interactive_temp.append(gt_poly[i,:])
            sampled_interactive = np.array(interactive_temp)
            #sampled_interactive = self.uniformsample(gt_poly, 40)
            # gt_poly = self.uniformsample(sampled_interactive, pnum)
            arr_gt_poly = np.ones((pnum, 2), np.float32) * 0.
            arr_gt_poly[:, :] = gt_poly

            # Numpy doesn't throw an error if the last index is greater than size
            if self.opts['get_point_annotation']:
                train_dict = {
                    'target_annotation': target_annotation,
                    'sampled_poly': sampled_poly,
                    'orig_poly': orig_poly,
                    'gt_poly': arr_gt_poly,
                    'annotation_prior':point_annotation,
                    'sampled_interactive': sampled_interactive,
                }
            else:
                train_dict = {
                    'sampled_poly': sampled_poly,
                    'orig_poly': orig_poly,
                    'gt_poly': arr_gt_poly,
                    'sampled_interactive': sampled_interactive,
                }
            if 'train' in self.mode:
                train_dict['onehot_label'] = np.array(onehot_label),
                train_dict['mask_distmap'] = np.array(mask_distmap)  # cwh

            boundry_dic = {
            'vertex_mask':vertex_mask,
            'edge_mask':edge_mask
            }
            train_dict.update(boundry_dic)
            if 'train' in self.mode:
                train_dict['label'] = instance['label']

        # for Torch, use CHW, instead of HWC
        img = img.transpose(2, 0, 1)
        # blank_image
        return_dict = {
            'img': img,
            'fwd_poly': arr_fwd_poly,
            'img_path': instance['img_path'],
            'patch_w': crop_info['patch_w'],
            'starting_point': crop_info['starting_point'],
            'context_expansion': context_expansion
        }

        return_dict.update(train_dict)

        return return_dict
    def prepare_instance(self, idx):
        """
        Prepare a single instance, can be both multicomponent
        or just a single component
        """
        instance = self.instances[idx]

        n_component = len(instance['components'])
        n_sample_point = int(self.opts['p_num'] / n_component)
        n_additional_point = self.opts['p_num'] - n_component * n_sample_point

        if self.opts['skip_multicomponent']:
            # Skip_multicomponent is true even during test because we use only
            # 1 bbox and no polys
            assert len(
                instance['components']) == 1, 'Found multicomponent instance\
            with skip_multicomponent set to True!'

            component = instance['components'][0]
            results = self.prepare_component(instance, component)

            results['gt_img'] = np.zeros(
                (self.opts['diff_iou_dim'], self.opts['diff_iou_dim']),
                dtype=np.int32)
            results['vertex_mask'] = np.zeros((28, 28), np.float32)
            results['edge_mask'] = np.zeros((28, 28), np.float32)

            all_comp_gt_poly_list = []
            all_comp_gt_poly = []
            comp = self.extract_crop(component, instance,
                                     results['context_expansion'])
            all_comp_gt_poly_list.append(comp['poly'])
            all_comp_gt_poly.extend(comp['poly'].tolist())

            all_comp_gt_poly_img_side = np.array(
                all_comp_gt_poly) * self.opts['img_side']
            ex_0, ex_1, ex_2, ex_3 = utils.extreme_points(
                all_comp_gt_poly_img_side)
            nodes = [ex_0, ex_1, ex_2, ex_3]
            point_annotation = utils.make_gt(nodes,
                                             h=self.opts['img_side'],
                                             w=self.opts['img_side'])
            results['annotation_prior'] = point_annotation

            for gt_poly in all_comp_gt_poly_list:
                gt_orig_poly = gt_poly.copy()

                gt_poly = np.array(gt_poly * self.opts['diff_iou_dim'],
                                   dtype=np.int32)
                img_mask = utils.masks_from_poly(gt_poly,
                                                 self.opts['diff_iou_dim'],
                                                 self.opts['diff_iou_dim'])
                results['gt_img'] = results['gt_img'] + img_mask.astype(
                    np.int32)

                gt_orig_poly = utils.poly01_to_poly0g(gt_orig_poly, 28)

                results['vertex_mask'] = utils.get_vertices_mask(
                    gt_orig_poly, results['vertex_mask'])
                results['edge_mask'] = utils.get_edge_mask(
                    gt_orig_poly, results['edge_mask'])
            results['gt_img'][results['gt_img'] > 0] = 255

            if 'test' in self.mode:
                results['instance'] = instance

        else:
            if 'test' in self.mode:
                component = instance['components'][0]
                results = self.prepare_component(instance, component)

                if self.opts['ext_points']:

                    all_comp_gt_poly = []
                    for component in instance['components']:
                        if component['area'] < self.opts['min_area']:
                            continue
                        else:
                            comp = self.extract_crop(
                                component, instance,
                                results['context_expansion'])
                            all_comp_gt_poly.extend(comp['poly'].tolist())

                    all_comp_gt_poly = np.array(
                        all_comp_gt_poly) * self.opts['img_side']

                    ex_0, ex_1, ex_2, ex_3 = utils.extreme_points(
                        all_comp_gt_poly)
                    nodes = [ex_0, ex_1, ex_2, ex_3]
                    point_annotation = utils.make_gt(nodes,
                                                     h=self.opts['img_side'],
                                                     w=self.opts['img_side'])
                    results['annotation_prior'] = point_annotation

            else:

                component = random.choice(instance['components'])
                results = self.prepare_component(instance, component)
                results['gt_img'] = np.zeros(
                    (self.opts['diff_iou_dim'], self.opts['diff_iou_dim']),
                    dtype=np.int32)
                results['gt_edge_img'] = np.zeros(
                    (self.opts['diff_iou_dim'], self.opts['diff_iou_dim']),
                    dtype=np.int32)

                results['vertex_mask'] = np.zeros((28, 28), np.float32)
                results['edge_mask'] = np.zeros((28, 28), np.float32)

                all_comp_gt_poly_list = []
                all_comp_gt_poly = []
                all_comp_sample_poly_list = []

                for component in instance['components']:
                    if component['area'] < self.opts['min_area']:
                        continue
                    else:
                        comp = self.extract_crop(component, instance,
                                                 results['context_expansion'])
                        all_comp_gt_poly_list.append(comp['poly'])
                        all_comp_gt_poly.extend(comp['poly'].tolist())

                        sampled_poly = self.uniformsample(
                            comp['poly'], n_sample_point)
                        all_comp_sample_poly_list.append(sampled_poly)

                all_comp_sample_poly_list.append(
                    sampled_poly[:n_additional_point])

                all_comp_sample_poly_array = np.vstack(
                    all_comp_sample_poly_list)

                all_comp_gt_poly_img_side = np.array(
                    all_comp_gt_poly) * self.opts['img_side']
                ex_0, ex_1, ex_2, ex_3 = utils.extreme_points(
                    all_comp_gt_poly_img_side)
                nodes = [ex_0, ex_1, ex_2, ex_3]
                point_annotation = utils.make_gt(nodes,
                                                 h=self.opts['img_side'],
                                                 w=self.opts['img_side'])
                results['annotation_prior'] = point_annotation

                for gt_poly in all_comp_gt_poly_list:
                    gt_orig_poly = gt_poly.copy()

                    gt_poly = np.array(gt_poly * self.opts['diff_iou_dim'],
                                       dtype=np.int32)
                    img_mask = utils.masks_from_poly(gt_poly,
                                                     self.opts['diff_iou_dim'],
                                                     self.opts['diff_iou_dim'])
                    results['gt_img'] = results['gt_img'] + img_mask.astype(
                        np.int32)

                    results['gt_edge_img'] = utils.get_edge_mask(
                        gt_poly, results['gt_edge_img'])

                    gt_orig_poly = utils.poly01_to_poly0g(gt_orig_poly, 28)

                    results['vertex_mask'] = utils.get_vertices_mask(
                        gt_orig_poly, results['vertex_mask'])
                    results['edge_mask'] = utils.get_edge_mask(
                        gt_orig_poly, results['edge_mask'])

                results['gt_edge_img'][results['gt_edge_img'] > 0] = 255
                results['gt_img'][results['gt_img'] > 0] = 255

                results['gt_poly_for_chamfer'] = all_comp_sample_poly_array

            results['instance'] = instance

        return results
Ejemplo n.º 7
0
    def train(self, epoch):
        print 'Starting training'
        self.model.temperature = self.opts['temperature']

        self.model.ggnn.encoder.train()

        accum = defaultdict(float)
        # To accumulate stats for printing
        ggnn_grid_size = self.opts['ggnn_grid_size']

        for step, data in enumerate(self.train_loader):

            self.optimizer.zero_grad()

            if self.global_step % self.opts['val_freq'] == 0:
                self.validate()
                self.save_checkpoint(epoch)

            output = self.model(data['img'].to(device),
                                data['fwd_poly'].to(device),
                                orig_poly=data['orig_poly'])

            ggnn_logits = output['ggnn_logits']
            local_prediction = output['ggnn_local_prediction'].to(device)
            poly_masks = output['ggnn_mask'].to(device)
            pred_polys = output['pred_polys'].data.numpy()

            loss_sum = losses.poly_vertex_loss_mle_ggnn(
                local_prediction, poly_masks, ggnn_logits)

            loss_sum.backward()

            if 'grad_clip' in self.opts.keys():  # "grad_clip": 40
                nn.utils.clip_grad_norm_(self.model.ggnn.parameters(),
                                         self.opts['grad_clip'])

            self.optimizer.step()

            with torch.no_grad():
                # Get IoU
                iou = 0
                orig_poly = data['orig_poly']

                for i in range(pred_polys.shape[0]):
                    p = pred_polys[i]

                    mask_poly = utils.get_masked_poly(
                        p,
                        self.model.ggnn.ggnn_grid_size)  #"ggnn_grid_size": 112
                    mask_poly = utils.class_to_xy(
                        mask_poly, self.model.ggnn.ggnn_grid_size)

                    curr_gt_poly_112 = utils.poly01_to_poly0g(
                        orig_poly[i], ggnn_grid_size)

                    cur_iou, masks = metrics.iou_from_poly(
                        np.array(mask_poly, dtype=np.int32),
                        np.array(curr_gt_poly_112, dtype=np.int32),
                        ggnn_grid_size, ggnn_grid_size)

                    iou += cur_iou
                iou = iou / pred_polys.shape[0]
                accum['loss'] += float(loss_sum.item())
                accum['iou'] += iou
                accum['length'] += 1
                if step % self.opts['print_freq'] == 0:  #"print_freq": 20
                    # Mean of accumulated values
                    for k in accum.keys():
                        if k == 'length':
                            continue
                        accum[k] /= accum['length']

                    # Add summaries
                    masks = np.expand_dims(masks, -1).astype(
                        np.uint8)  # Add a channel dimension
                    masks = np.tile(masks, [1, 1, 1, 3])  # Make [2, H, W, 3]
                    img = (data['img'].cpu().numpy()[-1, ...] * 255).astype(
                        np.uint8)
                    img = np.transpose(
                        img, [1, 2, 0])  # Make [H, W, 3], swap the dimention

                    self.writer.add_image('pred_mask', masks[0],
                                          self.global_step)
                    self.writer.add_image('gt_mask', masks[1],
                                          self.global_step)
                    self.writer.add_image('image', img, self.global_step)

                    for k in accum.keys():
                        if k == 'length':
                            continue
                        self.writer.add_scalar(k, accum[k], self.global_step)

                    print(
                    "[%s] Epoch: %d, Step: %d, Polygon Loss: %f,  IOU: %f" \
                    % (str(datetime.now()), epoch, self.global_step, accum['loss'], accum['iou']))

                    accum = defaultdict(float)

            del (output, local_prediction, poly_masks, masks, ggnn_logits,
                 pred_polys, loss_sum)
            self.global_step += 1