Exemplo n.º 1
0
    def forward(self, rois, feature_maps):
        """
        rois: (batch, n_rois, [y1, x1, y2, x2]). Proposal boxes in normalized coordinates.
        feature_maps: [p2, p3, p4, p5], Each is (batch, channels, h, w). Note h and w is different among feature maps.

        return:
            logits: (batch, n_rois, n_classes) classifier logits (before softmax)
            probs: (batch, n_rois, n_classes) classifier probabilities
            bbox_deltas: (batch, n_rois, n_classes, [dy, dx, log(dh), log(dw)]) Deltas to apply to proposal boxes.
        """
        # ROI Polling. (batch, num_rois, channels, pool_size, pool_size)
        x = self.pyramid_roi_align.process(rois, feature_maps)
        # self.vfm['fpn_classifier_roi_align'] = x

        # TODO: Make sure that batch_slice is equal to TimeDistributed
        # Share weights among dim "num_rois".
        x = Utils.time_distributed(x, self.conv1)
        x = Utils.time_distributed(x, self.conv2)
        # (batch, num_rois, fc_layers_size, 1, 1) to (batch, num_rois, fc_layers_size)
        shared = torch.squeeze(torch.squeeze(x, dim=4), dim=3)

        # Classifier head
        mrcnn_class_logits = Utils.time_distributed(shared, self.dense_logits)
        mrcnn_probs = Utils.time_distributed(mrcnn_class_logits, nn.Softmax(dim=-1))

        # BBox head
        mrcnn_bbox = Utils.time_distributed(shared, self.dense_bbox)
        # [batch, num_rois, NUM_CLASSES * (dy, dx, log(dh), log(dw))] to
        # [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))]
        shape = mrcnn_bbox.shape[:2] + (self.num_classes, 4)
        mrcnn_bbox = torch.reshape(mrcnn_bbox, shape)

        return mrcnn_class_logits, mrcnn_probs, mrcnn_bbox
Exemplo n.º 2
0
def get_max_jaccard_per_target_fragment(graph, predicted_fragments):
    max_jaccard_per_target = dict()

    nodes_labels = nx.get_node_attributes(graph, 'label')
    data_nodes, head_nodes = set(), set()
    for node, label in nodes_labels.items():
        if label == 'Header':
            head_nodes.add(node)
        else:
            data_nodes.add(node)

    # if it contains a header and a data, than it is a candidate table
    candidate_tables, other_fragments = list(), list()
    for frag in predicted_fragments:
        if (frag & head_nodes) and (frag & data_nodes):
            candidate_tables.append(frag)
        else:
            other_fragments.append(frag)

    tables_node_areas = Utils.group_node_area_tuples_by_tableId(graph)
    table_ids = set(tables_node_areas.keys())
    if len(candidate_tables) > 0:
        for frag in candidate_tables:
            jaccard_per_fragment = Utils.evaluate_identified_table(
                graph, graph.subgraph(frag))
            for tbl_id in jaccard_per_fragment.keys():
                if tbl_id != -1:  # -1 is a special id reserved for regions outside the annotated tables
                    if tbl_id in max_jaccard_per_target.keys():
                        if max_jaccard_per_target[
                                tbl_id] < jaccard_per_fragment[tbl_id]:
                            max_jaccard_per_target[
                                tbl_id] = jaccard_per_fragment[tbl_id]
                    else:
                        max_jaccard_per_target[tbl_id] = jaccard_per_fragment[
                            tbl_id]
    else:
        for tbl_id in table_ids:
            max_jaccard_per_target[tbl_id] = 0.0

    if (-1 in table_ids) and other_fragments:
        for frag in other_fragments:
            jaccard_per_fragment = Utils.evaluate_identified_table(
                graph, graph.subgraph(frag))
            if -1 in jaccard_per_fragment.keys():
                if -1 in max_jaccard_per_target.keys():
                    if max_jaccard_per_target[-1] < jaccard_per_fragment[-1]:
                        max_jaccard_per_target[-1] = jaccard_per_fragment[-1]
                else:
                    max_jaccard_per_target[-1] = jaccard_per_fragment[-1]

    # when there are tables that do not overlap with any of the predicted candidate_tables
    remaining = table_ids - set(max_jaccard_per_target.keys())
    for tbl_id in remaining:
        max_jaccard_per_target[tbl_id] = 0.0
    return max_jaccard_per_target
Exemplo n.º 3
0
def check_param(threadCount, requestUrl, methods, params):
    if threadCount <= 0:
        log.logger.error("请求数量不能小于 0 ")
        exit(0)
    if str(methods).lower() != 'get' and str(methods).lower() != 'post':
        log.logger.error("请求方法错误")
        exit(0)
    if util.check_url(requestUrl) is False:
        log.logger.error("请求地址格式错误")
        exit(0)
    if util.check_json(params) is False:
        log.logger.error("JSON格式错误")
        exit(0)
Exemplo n.º 4
0
    def process(self, anchors, scores, deltas):
        """
        anchors: (batch, num_anchors, [y1, x1, y2, x2]) anchors in normalized coordinates
        scores: (batch, num_anchors, [bg prob, fg prob])
        deltas: (batch, num_anchors, [dy, dx, log(dh), log(dw)])
        """
        # (batch, num_anchors, [fg_prob])
        scores = scores[:, :, 1]
        # TODO: Bounding box refinement standard deviation on deltas?

        # Filter out top N(pre_nms_limit) rois according to the scores and get their indices.
        scores, ix = torch.topk(scores,
                                k=self.pre_nms_limit,
                                dim=-1,
                                sorted=True)
        deltas = Utils.batch_slice(
            [deltas, ix], lambda x, y: torch.index_select(x, dim=0, index=y))
        anchors = Utils.batch_slice(
            [anchors, ix], lambda x, y: torch.index_select(x, dim=0, index=y))
        self.vfm['rpn_scores'] = scores
        self.vfm['rpn_anchors'] = anchors

        # Apply deltas to anchors to get refined boxes. [batch, N, (y1, x1, y2, x2)]
        boxes = Utils.batch_slice([anchors, deltas],
                                  lambda x, y: Utils.refine_boxes(x, y))

        # Clip boxes
        window = torch.tensor([0, 0, 1, 1],
                              dtype=boxes.dtype).to(device=boxes.device)
        boxes = Utils.batch_slice(boxes, lambda x: Utils.clip_boxes(x, window))

        # nms
        proposals = Utils.batch_slice([boxes, scores], self.nms)
        return proposals
Exemplo n.º 5
0
def lambda_handler(event, context):
    add_device_readings()
    """
    This function inserts content into mysql RDS instance
    """
    # class MySQLModel(pw.Model):
    #     '''
    #     base model that will use our MySQL database
    #     '''
    #     class Meta:
    #         database = myDB

    # class User(MySQLModel):
    #     username = pw.CharField()
    #     # etc, etc


    # # when you're ready to start querying, remember to connect
    # try:
    #     myDB.connect()
    #     logger.info("Connected to db")
    # except Exception as e:
    #     logger.error("Unable to connect to the DB")
    # data_source = [
    # {'field1': 'val1-1', 'field2': 'val1-2'},
    # {'field1': 'val2-1', 'field2': 'val2-2'},
    # # ...
    # ]
    # SensorName	LastCommunicationDate	X_mms	Y_mms	Z_mms	X_hz	Y_hz	Z_hz
    # Sensor-300578	08-08-2017 08:14	20	20.4	16.7	14	12	17
    # Sensor-300578	08-08-2017 08:14	20	20.4	16.7	14	12	17
    # Sensor-300578	08-08-2017 08:14	20	20.4	16.7	14	12	17
    # Sensor-300577	08-08-2017 08:17	25	24.3	10.8	15	11	19

    # Sensor-300578	08-08-2017 08:14	20	20.4	16.7	14	12	17
    dt = '08-08-2017 08:14'
    import datetime
    datetime_object = datetime.datetime.strptime(dt, '%d-%m-%Y %H:%M')
    data_source = [
        {'device': Device.get_or_create(name='Sensor-300578')[0], 'device_display_time' : datetime_object},
        {'device': Device.get_or_create(name='Sensor-300577')[0], 'device_display_time' : datetime_object}
    ]
    # data_source = [
    #     {'device': Device.get_or_create(name='Sensor-300578') },
    #     {'device': Device.get_or_create(name='Sensor-300577')}
    # ]
    Utils().bulk_insert_device_readings(data_source)
    # person, created = Device.get_or_create(name='Sensor-300578')
    # print(person, created)
    # print(Device.get_or_create(name='Sensor-300578')[0])

    # foo = DeviceReadings.create(device = Device.get(name='Sensor-300578'))
    # print("device created")
    # print(foo)
    # user = User.create(username='******', password='******')
    return "Added %d items to RDS MySQL table"
Exemplo n.º 6
0
def get_box_ap_dict_graph(n_class_ids, gt_class_ids, gt_boxes, detection_boxes,
                          detection_classes, detection_scores):
    """
    Get ap tp/fp list of the detection boxes in an image.

    n_class_ids: int. The number of classes.
    gt_class_ids: (max_instance_per_img)
    gt_boxes: (max_instance_per_img, [y1, x1, y2, x2])
    detection_boxes: (detection_max_instance, [y1, x1, y2, x2])
    detection_classes: (detection_max_instance, [class_id])
    detection_scores: (detection_max_instance, [score])

    return:
        class_ap_dict: {class_id: [confidence, judge]}
    """
    # Create ap dict. {class_id: [confidence, judge]}
    class_ap_dict = {}
    for i in range(n_class_ids):
        class_ap_dict[i + 1] = []

    for class_id_from_zero in range(n_class_ids):
        class_id = class_id_from_zero + 1
        gt_index = gt_class_ids.eq(class_id)
        gt_box = gt_boxes[gt_index]  # (n_gt_box)

        detection_index = detection_classes.eq(class_id)
        confidence = detection_scores[detection_index]  # (n_detection_box)
        detection_box = detection_boxes[
            detection_index]  # (n_detection_box, 4)

        if gt_box.shape[0] == 0:
            tp_index = set()
        else:
            overlaps = Utils.compute_overlaps(
                detection_box, gt_box)  # (n_detection_box, n_gt_box)
            # 1. For every gt box, get the max IoU as tp.
            if overlaps.shape[0] > 1:
                tp_index1 = overlaps.argmax(dim=0)  # (n_gt_box)
            else:
                tp_index1 = torch.tensor([0], dtype=torch.int32)
            # 2. Get the index of the box which has IoU>0.5.
            tp_index2 = overlaps.gt(0.5).nonzero()[:, 0]
            # 3. Take intersection set.
            tp_index1 = tp_index1.cpu().numpy().tolist()
            tp_index2 = tp_index2.cpu().numpy().tolist()
            tp_index = set(tp_index1).intersection(set(tp_index2))

        # Append [confidence, judge] for specific class_id.
        for n in range(confidence.shape[0]):
            if n in tp_index:
                judge = 'tp'
            else:
                judge = 'fp'
            class_ap_dict[class_id].append([confidence[n].cpu().item(), judge])
    return class_ap_dict
Exemplo n.º 7
0
    def process(self, rois, mrcnn_class, mrcnn_bbox):
        """
        rois: (batch, n_rois, 4)
        mrcnn_class: (batch, n_rois, n_classes)
        mrcnn_bbox: (batch, n_rois, n_classes, 4)

        return: (batch, detection_max_instance, [y1, x1, y2, x2, class_id, score])
        """
        detections_batch = Utils.batch_slice([rois, mrcnn_class, mrcnn_bbox],
                                             self.refine_detections_graph)
        return detections_batch
Exemplo n.º 8
0
    def show_detections(self):
        for batch in range(self.batchsize):
            batch_denorm_boxes = Utils.denorm_boxes(self.detection_boxes[batch], self.image_shape)

            # gather masks
            batch_masks_raw = self.detection_masks[batch]  # (max_instances, n_classes, h_mask, w_mask)
            batch_class_ids = self.detection_class_ids[batch]  # (max_instances)
            # (max_instances, h_mask, w_mask)
            detection_masks = batch_masks_raw[np.arange(0, batch_masks_raw.shape[0]), batch_class_ids.astype(np.int64)]
            
            self.draw_boxes(batch, batch_denorm_boxes, batch_class_ids,
                            self.detection_scores[batch], detection_masks, self.detection_savepath)
Exemplo n.º 9
0
 def show_boxes(self, boxes, save_dir, scores=None):
     """
     channel: int.
     boxes: (batch, n_boxes, 4)
     scores: (batch, n_boxes)
     """
     boxes = self.get_numpy_if_tensor(boxes)
     scores = self.get_numpy_if_tensor(scores)
     for batch in range(self.batchsize):
         denorm_boxes = Utils.denorm_boxes(boxes[batch], self.image_shape)
         score = self.get_slice_data(scores, batch)
         self.draw_boxes(batch, denorm_boxes, scores=score, save_dir=save_dir)
Exemplo n.º 10
0
    def show_boxes_match(self, boxes, match, match_score, save_dir):
        """
        channel: int.
        boxes: (batch, n_boxes, 4)
        match: (batch, n_boxes)
        save_dir: str
        """
        boxes = self.get_numpy_if_tensor(boxes)
        match = self.get_numpy_if_tensor(match)
        for batch in range(self.batchsize):
            ix = np.equal(match[batch], match_score).nonzero()
            boxes_match = boxes[batch][ix]

            denorm_boxes = Utils.denorm_boxes(boxes_match, self.image_shape)
            self.draw_boxes(batch, self.show_channel, denorm_boxes, save_dir=save_dir)
Exemplo n.º 11
0
    def show_boxes_filt(self, boxes, score, threshold, save_dir):
        """
        channel: int.
        boxes: (batch, n_boxes, 4)
        match: (batch, n_boxes)
        save_dir: str
        """
        boxes = self.get_numpy_if_tensor(boxes)
        score = self.get_numpy_if_tensor(score)
        for batch in range(self.batchsize):
            ix = np.where(score[batch]>threshold, True, False)
            boxes_match = boxes[batch][ix]

            denorm_boxes = Utils.denorm_boxes(boxes_match, self.image_shape)
            self.draw_boxes(batch, denorm_boxes, save_dir=save_dir)
Exemplo n.º 12
0
    def visualize(self, save_dir):
        boxes = Utils.denorm_boxes(self.boxes, self.canvas.shape[-2:])

        for batch in range(boxes.shape[0]):
            image = self.canvas[batch, 0]
            for i in range(boxes.shape[1]):
                box = boxes[batch, i]
                class_id = int(round(self.class_ids[batch, i]))
                mask = self.mask[batch, i]

                save_path = os.path.join(save_dir, '{}_{}'.format(batch, i))
                Visualization.visualize_1_box(image,
                                              box,
                                              name=str(class_id),
                                              mask=mask,
                                              save_path=save_path)
Exemplo n.º 13
0
def lambda_handler(event, context):
    """
    This function reads csv file from S3 bucket and 
    inserts content into mysql RDS instance.
    
    ** This lambda is not suitable to files with many records.
    """
    # Sample CSV format
    # SensorName	LastCommunicationDate	X_mms	Y_mms	Z_mms	X_hz	Y_hz	Z_hz
    # Sensor-300578	08-08-2017 08:14	20	20.4	16.7	14	12	17
    # Sensor-300578	08-08-2017 08:14	20	20.4	16.7	14	12	17
    # Sensor-300578	08-08-2017 08:14	20	20.4	16.7	14	12	17
    # Sensor-300577	08-08-2017 08:17	25	24.3	10.8	15	11	19
    ''' 
    Execute the below lines to set up the tables and load mock data
    logger.info("start setup tables")
     Utils().setup_tables()
     Utils().load_mock_data()
     logger.info("setup table and mock data completed")
    '''
    try:
        lines = read_csv_s3(event)
        data_source = []
        logger.info("start forming device reading object")
        idx = 0
        for line in lines:
            if idx > 0:
                d_reading_obj = construct_device_reading_data(line)
                if d_reading_obj:
                    data_source.append(d_reading_obj)
                    check_notify_alert(d_reading_obj)
            idx += 1
            # logger.info("IDX NO %s", idx)
        logger.info("Adding data to the db. IDX : %s", idx)
        logger.info(data_source)
        Utils().bulk_insert_device_readings(data_source)
        logger.info("bulk insert successful")
    except Exception as exp:
        logger.exception("Exception occured %s", exp)

    return "Added %d items to RDS MySQL table" % (idx)
Exemplo n.º 14
0
    def forward(self, rois, feature_maps):
        """
        rois: (batch, n_rois, [y1, x1, y2, x2]). Proposal boxes in normalized coordinates.
        feature_maps: [p2, p3, p4, p5], Each is (batch, channels, h, w). Note h and w is different among feature maps.

        return:(batch, num_rois, n_classes, mask_pool_size*2, mask_pool_size*2)
        """
        # ROI Polling. (batch, num_rois, channels, mask_pool_size, mask_pool_size)
        x = self.pyramid_roi_align.process(rois, feature_maps)
        # self.vfm['fpn_mask_roi_align'] = x
        # x = Utils.batch_slice(x, self.conv1)
        x = Utils.time_distributed(x, self.conv1)
        # self.vfm['fpn_mask_conv1'] = x
        # x = Utils.batch_slice(x, self.conv2)
        x = Utils.time_distributed(x, self.conv2)

        # x.register_hook(save_grad('x'))
        # try:
        #     print(grads['x'].max())
        # except:
        #     pass

        # self.vfm['fpn_mask_conv2'] = x
        # x = Utils.batch_slice(x, self.conv3)
        x = Utils.time_distributed(x, self.conv3)
        # self.vfm['fpn_mask_conv3'] = x
        # x = Utils.batch_slice(x, self.conv4)
        x = Utils.time_distributed(x, self.conv4)
        # self.vfm['fpn_mask_conv4'] = x
        # x = Utils.batch_slice(x, self.deconv)
        x = Utils.time_distributed(x, self.deconv)
        # self.vfm['fpn_mask_deconv'] = x
        # x = Utils.batch_slice(x, self.conv1x1)
        x = Utils.time_distributed(x, self.conv1x1)
        # self.vfm['fpn_mask_conv1x1'] = x
        return x
Exemplo n.º 15
0
    #
    # rua = ops.roi_align(images, box, output_size=[60,40])
    # rua = rua.squeeze(dim=1).cpu()
    # plt.imshow(rua[0, 0])
    # plt.show()

    # 2.
    # numpy_data = np.zeros([40000, 1], dtype=float)
    # cpu_data = torch.tensor(numpy_data)
    # gpu_data = cpu_data.cuda()
    #
    # print('gpu: ', torch.argmax(gpu_data, dim=1))
    # print('cpu: ', torch.argmax(cpu_data, dim=1))

    # 3.
    grad_saver = Utils.GradSaver()
    size = 10

    x = 0.1 * torch.ones([size, size], dtype=torch.float32,
                         requires_grad=True).cuda()
    y = torch.ones([size, size], dtype=torch.float32,
                   requires_grad=False).cuda()
    z = torch.nn.functional.binary_cross_entropy(x, y)

    # In here, save_grad('y') returns a hook (a function) that keeps 'y' as name
    x.register_hook(grad_saver.save_grad('x_grad'))
    z.register_hook(grad_saver.save_grad('z_grad'))
    z.backward()

    grad_saver.print_grad('x_grad')
    grad_saver.print_grad('z_grad')
Exemplo n.º 16
0
import torch
import torch.nn as nn
from model.FunctionLayers import PyramidROIAlign
from model import Utils
from torchvision.models.resnet import ResNet, Bottleneck


grads = Utils.GradSaver()


# ResNet
class ResBlock(nn.Module):
    """
    Construct Residual block used in the ResNet.
    """
    def __init__(self, in_channels, filters, stride=1, res_conv=False, train_bn=True):
        """
        in_channels: the channel number of input tensor
        filters: [n_filter1, n_filter2, n_filter3], the filter number of the three conv blocks
        stride: the stride of the first conv1x1 (including shortcut)
        res_conv: bool, whether conv1x1 is used in the shortcut
        """
        super().__init__()
        self.res_conv = res_conv

        self.conv1 = nn.Sequential(nn.Conv2d(in_channels, filters[0], kernel_size=1, stride=stride),
                                   nn.BatchNorm2d(filters[0], track_running_stats=train_bn),
                                   nn.ReLU())
        self.conv2 = nn.Sequential(nn.Conv2d(filters[0], filters[1], kernel_size=3, padding=1),
                                   nn.BatchNorm2d(filters[1], track_running_stats=train_bn),
                                   nn.ReLU())
Exemplo n.º 17
0
def main(test_dataset_path, train_dataset_path, metrics_calc_fun, weight_bounds, n_buckets=10, seed=1,
         n_opt_runs=10, n_searches=10, max_n_frgm=200, n_frgm_per_table=10, tf=None):

    # read json datasets
    print("\nUsing training dataset = %s" % train_dataset_path)
    print("Using test dataset = %s" % test_dataset_path)
    test_sheets = Reader.read_json_dataset(test_dataset_path)
    error_sheets = Reader.read_json_dataset(train_dataset_path)
    test_graphs = list()
    error_graphs = list()
    for i in range(len(error_sheets)):
        # create graphs
        er_multi_digraph = Builder.create_graph_for_sheet(error_sheets[i])
        ts_multi_digraph = Builder.create_graph_for_sheet(test_sheets[i])
        # remove in-view edges
        er_multi_digraph = Utils.remove_inview_edges(er_multi_digraph, is_strict=False)
        ts_multi_digraph = Utils.remove_inview_edges(ts_multi_digraph, is_strict=False)
        # store graph for later use
        error_graphs.append(er_multi_digraph)
        test_graphs.append(ts_multi_digraph)

    # distribute the graphs into buckets for the cross validation
    print("Distribute into %d buckets using seed=%d" % (n_buckets, seed))
    error_buckets = distribute_graphs_to_buckets_by_file(error_graphs, n_buckets, balance_multis=True, seed=seed)
    test_buckets = distribute_graphs_to_buckets_by_file(test_graphs, n_buckets, balance_multis=True, seed=seed)

    print("Execute %d times the genetic search" % n_searches)
    print("Perform %d weight optimization rounds" % n_opt_runs)
    # fit the transformer for scaling the metrics' values
    print("Using max_n_frag=%d, n_frag_per_table=%d" % (max_n_frgm, n_frgm_per_table))
    if not tf:
        tf_fit = None
        print("Working with unscaled data")
    else:
        tf_fit = fit_transformer(tf, error_graphs, metrics_calc_fun, max_n_frgm * n_opt_runs,
                                 (n_frgm_per_table * n_opt_runs) if n_frgm_per_table > 0 else -1)
        print("Scale the data using '%s'" % tf.__class__.__name__)

    # collect results
    print("Using the function '%s' to calculate fragmentation metrics" % str(metrics_calc_fun.__name__))

    scores_per_graph = list()
    target_scores, predicted_scores = list(), list()
    # for each fold train and test
    for j in range(len(error_buckets)):
        print("\n\nFold Nr.%d" % j)
        train_buckets = list(error_buckets[:j])
        train_buckets.extend(error_buckets[j + 1:])
        # the sheet graphs from the other buckets
        train_graphs = list()
        for bk in train_buckets:
            train_graphs.extend(bk.sheet_graphs)

        # identify the optimal weights based on examples from the training set.
        # for better accuracy, we perform the optimization several times
        # and take the weighted average of the results
        error_rates = list()
        weights = list()
        for _ in range(n_opt_runs):
            # create an optimization sample
            tables_metrics, part_metrics, tables_counts, graph_indices = \
                get_sample_for_optimization(train_graphs, metrics_calc_fun,
                                            max_n_frgm=max_n_frgm, n_frgm_per_table=n_frgm_per_table)

            tables_values, part_values = list(), list()
            for index in range(len(tables_metrics)):
                tables_values.append(list(tables_metrics[index].values()))
                part_values.append(list(part_metrics[index].values()))

            if tf_fit:
                t_values_scaled = tf_fit.transform(np.array(tables_values))
                p_values_scaled = tf_fit.transform(np.array(part_values))
            else:
                t_values_scaled = tables_values
                p_values_scaled = part_values

            optw = get_optimal_weights(t_values_scaled, p_values_scaled, weight_bounds,
                                       tbl_counts=None, method='slsqp_jac')
            weights.append(optw)
            error = calculate_error_rate(t_values_scaled, p_values_scaled, optw)
            error_rates.append(error)

        # calculate the weighted average of all optimization iterations
        # we use the error rate to weight the instances (i.e., the optimal weights)
        inverse_error_rates = [1 - er for er in error_rates]  # the smaller the error the better
        weights_t = np.transpose(weights)  # transpose to bring to the right shape for multiplication
        multi_ew = np.multiply(inverse_error_rates, weights_t)
        sum_ew = np.sum(multi_ew, axis=1)
        inverse_error_sum = sum(inverse_error_rates)
        avg_opt_weights = list(np.divide(sum_ew, inverse_error_sum))
        print("Optimal Weights: %s" % str(avg_opt_weights))
        print("Inverse Errors: %s" % str(inverse_error_rates))

        # the averaged weights for each metric
        metrics_weights = dict()
        # metrics_weights["count_empty_cols"] = avg_opt_weights[0]
        # metrics_weights["count_empty_rows"] = avg_opt_weights[1]
        metrics_weights["avg_adj_emt_width"] = avg_opt_weights[0]
        metrics_weights["avg_adj_emt_height"] = avg_opt_weights[1]
        metrics_weights["data_above_ratio"] = avg_opt_weights[2]
        metrics_weights["neg_head_align_ratio"] = avg_opt_weights[3]
        metrics_weights["neg_data_align_ratio"] = avg_opt_weights[4]
        metrics_weights["count_other_hvalid"] = avg_opt_weights[5]
        metrics_weights["all_in_one_column"] = avg_opt_weights[6]
        # metrics_weights["count_only_data"] = avg_opt_weights[7]
        # metrics_weights["count_only_head"] = avg_opt_weights[8]
        metrics_weights["overlap_ratio"] = avg_opt_weights[7]

        # identify the tables for each sheet graph in the test bucket
        for bk_gix in range(len(test_buckets[j].sheet_graphs)):
            bk_graph = test_buckets[j].sheet_graphs[bk_gix]

            # print("\nGraph={file='%s', sheet='%s'}" % (bk_graph.graph['file'], bk_graph.graph['sheet']))
            jaccards_per_table = dict()
            for _ in range(n_searches):
                # search for best fragmentation. search can be either exhaustive or genetic, depending on the graph size
                predicted, p_score, true_tables, t_score = \
                    reduced_search.identify_tables(bk_graph, metrics_weights, metrics_calc_fun,
                                                   genetic_algorithm='varOr', scaler=tf_fit, use_seed=True,
                                                   multi_process=False, verbose=False, show_stats=False)
                predicted_scores.append(p_score)
                target_scores.append(t_score)

                # evaluate the predictions, by finding per true table the best match from the predicted fragments
                max_jaccards = eval.get_max_jaccard_per_target_fragment(bk_graph, predicted)
                for tid in max_jaccards.keys():
                    if tid not in jaccards_per_table:
                        jaccards_per_table[tid] = list()
                    jaccards_per_table[tid].append(max_jaccards[tid])

            # calculate the average jaccard per table.
            avg_jaccard_per_table = dict()
            for tid in jaccards_per_table.keys():
                avg_jaccard_per_table[tid] = sum(jaccards_per_table[tid]) / n_searches
            scores_per_graph.append(avg_jaccard_per_table)
            # print("Avg Jaccard per Table = %s" % str(avg_jaccard_per_table))

    # display the evaluation results
    ordered_sheet_graphs = list()
    for bk in test_buckets:
        ordered_sheet_graphs.extend(bk.sheet_graphs)

    dict_scores_per_graph = dict()
    for ix in range(len(ordered_sheet_graphs)):
        dict_scores_per_graph[ix] = scores_per_graph[ix]
    print("\n\n\nOverall Results")
    eval.display_evaluation_results(dict_scores_per_graph, ordered_sheet_graphs, print_threshold=0.9)
Exemplo n.º 18
0
 def process(self, proposals):
     outputs = Utils.batch_slice(
         [proposals, self.gt_class_ids, self.gt_boxes, self.gt_masks],
         self.detection_targets_graph)
     return outputs
Exemplo n.º 19
0
    def detection_targets_graph(self, proposals, gt_class_ids, gt_boxes,
                                gt_masks):
        """
        Subsample proposals for one image (i.e. one batch) by splitting positive and negative proposals.

        proposals: (N, [y1, x1, y2, x2]). Proposals in normalized coordinates after ProposalLayer. Might be zero padded
                   if there are not enough  proposals.
        gt_class_ids: (all_GT_instances). Class IDs.
        gt_boxes: (all_GT_instances, [y1, x1, y2, x2]). Ground truth boxes in normalized coordinates.
        gt_masks: (all_GT_instances, height, width). Ground truth masks of boolen type.

        return:
            rois: (n, 4). With zero paddings.
            roi_gt_class_ids: (n). With zero paddings.
            deltas: (n, 4). With zero paddings.
            roi_gt_masks_minibox: (n, mask_h, mask_w)
        """
        # Remove zero padding
        proposals, _ = Utils.trim_zero_graph(proposals)
        gt_boxes, non_zeros_ix = Utils.trim_zero_graph(gt_boxes)
        gt_class_ids = torch.index_select(gt_class_ids,
                                          dim=0,
                                          index=non_zeros_ix)
        gt_masks = torch.index_select(gt_masks, dim=0, index=non_zeros_ix)

        # Compute overlaps.
        overlaps = Utils.compute_overlaps(
            proposals, gt_boxes)  # (n_proposals, n_gt_boxes)

        # Determine positive and negative ROIs.
        # To every proposal, get the max IoU with all the gt boxes.
        proposal_iou_max, _ = torch.max(overlaps, dim=1)
        # Positive rois are those with >= 0.5 IoU with a GT box.
        # Negative rois are those with < 0.5 IoU with every GT box.
        positive_roi_bool = torch.gt(
            proposal_iou_max,
            torch.tensor([self.positive_iou_threshold],
                         dtype=proposal_iou_max.dtype,
                         device=proposal_iou_max.device))
        positive_ix = torch.nonzero(positive_roi_bool)
        negative_ix = torch.nonzero(~positive_roi_bool)
        # print(positive_ix.shape, negative_ix.shape)

        # Subsample rois to make positive/all = proposal_positive_ratio
        # 1. Positive rois (proposal_positive_ratio * train_proposals_per_image, 4)
        positive_count = int(self.roi_positive_ratio *
                             self.train_rois_per_image)
        # TODO: Need shuffle on positive_ix and negative_ix before index selecting.
        positive_ix = positive_ix[0:positive_count].squeeze(1)
        # 2. Negative rois ((1-1/proposal_positive_ratio)*positive_count, 4)
        # Should calculated by this formula because positive rois may be not enough.
        negative_count = int(
            (1 / self.roi_positive_ratio - 1) * positive_count)
        negative_ix = negative_ix[0:negative_count].squeeze(1)
        # 3. Gather selected rois
        positive_rois = torch.index_select(proposals, dim=0, index=positive_ix)
        negative_rois = torch.index_select(proposals, dim=0, index=negative_ix)

        # Assign positive rois to corresponding GT boxes
        # positive overlaps: (n_positive, n_gt_boxes)
        positive_overlaps = torch.index_select(overlaps,
                                               dim=0,
                                               index=positive_ix)
        # roi_gt_box_assignment: (n_positive), best corresponding GT box ids of every ROI
        if positive_overlaps.shape[0] > 0:
            roi_gt_box_assignment = torch.argmax(positive_overlaps, dim=1).to(
                positive_overlaps.device)
        else:
            roi_gt_box_assignment = torch.tensor([], dtype=torch.int64).to(
                positive_overlaps.device)
        # roi_gt_boxes: (n_positive, 4). roi_gt_class_ids: (n_positive)
        roi_gt_boxes = torch.index_select(gt_boxes,
                                          dim=0,
                                          index=roi_gt_box_assignment)
        roi_gt_class_ids = torch.index_select(gt_class_ids,
                                              dim=0,
                                              index=roi_gt_box_assignment)

        # Compute deltas from positive_rois to roi_gt_boxes. (n_positive, 4)
        # TODO: BBOX_STD_DEV?
        deltas = Utils.compute_deltas(positive_rois, roi_gt_boxes)

        # Assign positive ROIs to corresponding GT masks. And permute to (n_positive, 1, height, weight)
        permuted_gt_masks = torch.unsqueeze(gt_masks, dim=1)
        roi_gt_masks = torch.index_select(permuted_gt_masks,
                                          dim=0,
                                          index=roi_gt_box_assignment)

        # Get masks in roi boxes. (n_positive, mask_h, mask_w)
        # TODO: normalize_to_mini_mask?
        positive_rois_transformed = transform_coordianates(
            positive_rois, gt_masks.shape[1:])
        box_ids = torch.unsqueeze(torch.arange(0, roi_gt_masks.shape[0]),
                                  dim=1).to(roi_gt_masks.dtype).to(
                                      roi_gt_masks.device)
        positive_rois_transformed = torch.cat(
            [box_ids, positive_rois_transformed], dim=1)
        roi_gt_masks_minibox = ops.roi_align(roi_gt_masks,
                                             positive_rois_transformed,
                                             self.mask_shape)
        # Remove the extra dimension from masks.
        roi_gt_masks_minibox = torch.squeeze(roi_gt_masks_minibox, dim=1)
        # Threshold mask pixels at 0.5(have decimal cecause of RoiAlign) to have GT masks be 0 or 1
        # to use with binary cross entropy loss.
        roi_gt_masks_minibox = torch.round(roi_gt_masks_minibox)

        # Append negative ROIs and pad zeros for negative ROIs' bbox deltas and masks.
        rois = torch.cat([positive_rois, negative_rois], dim=0)
        n_nagetvie = negative_rois.shape[0]
        n_padding = torch.tensor(
            max(self.train_rois_per_image - rois.shape[0], 0))
        # Padding
        rois = torch.nn.functional.pad(rois, pad=[0, 0, 0, n_padding])
        roi_gt_boxes = torch.nn.functional.pad(
            roi_gt_boxes, pad=[0, 0, 0, n_padding + n_nagetvie])
        roi_gt_class_ids = torch.nn.functional.pad(
            roi_gt_class_ids, pad=[0, n_padding + n_nagetvie])
        deltas = torch.nn.functional.pad(deltas,
                                         pad=[0, 0, 0, n_padding + n_nagetvie])
        roi_gt_masks_minibox = torch.nn.functional.pad(
            roi_gt_masks_minibox, pad=[0, 0, 0, 0, 0, n_padding + n_nagetvie])

        # TODO: require grad?
        deltas = deltas.detach()
        roi_gt_masks_minibox = roi_gt_masks_minibox.detach()
        return rois, roi_gt_class_ids, deltas, roi_gt_masks_minibox
Exemplo n.º 20
0
    def show_dataset(self):
        for batch in range(self.batchsize):
            denorm_boxes = Utils.denorm_boxes(self.boxes[batch], self.image_shape)

            self.draw_boxes(batch, denorm_boxes, self.class_ids[batch],
                                masks=self.masks[batch], save_dir=self.dataest_savepath)
Exemplo n.º 21
0
    def refine_detections_graph(self, rois, probs, deltas):
        """
        rois: (N, [y1, x1, y2, x2]) in normalized coordinates.
        probs: (N, num_classes). All class probabilities of each roi.
              Note: num_classes includes background.
        deltas: (N, num_classes, [dy, dx, log(dh), log(dw)]). Deltas to all class of each roi.

        return: (detection_max_instance, [y1, x1, y2, x2, class_id, score])
        """
        # Best corresponding class to each roi.(from 0 to n_classes)
        class_ids = torch.argmax(probs, dim=1)  # (N)
        # Best corresponding class scores and deltas.
        class_scores = probs[torch.arange(class_ids.shape[0]),
                             class_ids]  # (N)
        deltas_specific = deltas[torch.arange(class_ids.shape[0]),
                                 class_ids, :]  # (N,4)

        # Apply bounding box deltas TODO: deltas_specific * config.BBOX_STD_DEV?
        refined_rois = Utils.refine_boxes(rois, deltas_specific)
        refined_rois = Utils.clip_boxes(
            refined_rois, self.window.to(device=refined_rois.device))

        if 'refined_rois' not in self.vfm.keys():
            self.vfm['refined_rois'] = refined_rois.unsqueeze(dim=0)
        else:
            self.vfm['refined_rois'] = torch.cat(
                [self.vfm['refined_rois'],
                 refined_rois.unsqueeze(dim=0)],
                dim=0)

        # Filter out background.
        # keep = torch.nonzero(torch.gt(class_ids, 0))[:, 0]  # (n)

        # Filter out background.
        keep = class_ids.gt(0)  # (n)
        # Omit filter out low confidence boxes. TODO: Confirm if it's appropriate.
        conf_keep = class_scores.gt(self.detection_min_confidence)  # (n)
        keep = (keep * conf_keep).nonzero()[:, 0]  # (n)

        # Apply per-class NMS
        # 1. Prepare
        pre_nms_class_ids = class_ids[keep]  # (n)
        pre_nms_scores = class_scores[keep]  # (n)
        pre_nms_rois = refined_rois[keep]  # (n,4)
        unique_pre_nms_class_ids = torch.unique(
            pre_nms_class_ids)  # (n_unique). set of the class ids.

        def nms(class_id):
            """
            Apply Non-Maximum Suppression on ROIs of the given class.

            class_id: int.

            return: (detection_max_instance)
            """
            # Indices of ROIS of the given class
            ixs = pre_nms_class_ids.eq(class_id).nonzero()[:, 0]
            # Apply NMS. class_keep is the indice of the roi(after ixs index) after nms.
            class_keep = ops.nms(pre_nms_rois[ixs],
                                 pre_nms_scores[ixs],
                                 iou_threshold=self.detection_nms_threshold)
            # Because torch can't limit the proposal_count, it should be realized by myself.
            if class_keep.shape[0] > self.detection_max_instance:
                class_keep = class_keep[
                    0:self.
                    detection_max_instance]  # because ops.nms has sorted it.
            class_keep = keep[ixs[class_keep]]
            # Pad with -1 so it can stack over ids.
            padding_count = self.detection_max_instance - class_keep.shape[0]
            class_keep = nn.functional.pad(class_keep, [0, padding_count],
                                           value=-1)
            return class_keep

        # 2. Loop over class IDs. (n_unique, detection_max_instance)
        nms_keep = Utils.batch_slice(
            torch.unsqueeze(unique_pre_nms_class_ids, dim=1), nms)
        # 3. Merge results into one dim, and remove -1 padding.
        nms_keep = torch.reshape(nms_keep,
                                 [-1])  # (n_unique * detection_max_instance)
        nms_keep = nms_keep[torch.gt(nms_keep, -1)]  # (n_nms)

        # 4. Compute intersection between keep and nms_keep. TODO: why not just use nms_keep.
        keep = set(keep.cpu().numpy().tolist()).intersection(
            set(nms_keep.cpu().numpy().tolist()))
        keep = torch.tensor(list(keep)).to(nms_keep.device)

        # Keep top detections. TODO: redundant?
        class_scores_keep = class_scores[keep]
        num_keep = min(class_scores_keep.shape[0], self.detection_max_instance)
        top_ids = torch.topk(class_scores_keep, k=num_keep, sorted=True)[1]

        # Arrange output as (n_detections, [y1, x1, y2, x2, class_id, score])
        detections = torch.cat([
            refined_rois[keep], class_ids[keep].to(
                refined_rois.dtype).unsqueeze(dim=1),
            torch.unsqueeze(class_scores[keep], dim=1)
        ],
                               dim=1)
        # Pad with zeros. Negative padding_count will reduce detections number to detection_max_instance.
        padding_count = self.detection_max_instance - detections.shape[0]
        detections = nn.functional.pad(detections, [0, 0, 0, padding_count])
        return detections
Exemplo n.º 22
0
import warnings
warnings.simplefilter('ignore')

from model import Utils
from model import Pix2Pix as p2pModel

if __name__ == '__main__':
	row, cdt = Utils().load_data(dirs="Dataset", dataset_name="nobg_mask")
	p2p = p2pModel(dataA=cdt, dataB=row)
	generator = p2p.build_generator()
	discriminator = p2p.build_discriminator()
	p2pgan = p2p.build_gan(generator, discriminator)


	from keras.utils import plot_model
	plot_model(generator, to_file="gen.png", show_shapes="True")
	plot_model(discriminator, to_file="dis.png", show_shapes="True")
	plot_model(p2pgan, to_file="gan.png", show_shapes="True")


	p2p.train(name="NBGM", models=[generator, discriminator, p2pgan], epochs=250, batch_size=16, sample_interval=5)
Exemplo n.º 23
0
def get_sample_for_optimization(graphs,
                                metrics_function,
                                max_n_frgm=10,
                                n_frgm_per_table=-1,
                                verbose=False):
    annotated_tables, other_fragmentations = list(), list()
    table_counts, count_single, count_multi = list(), list(), list()
    graph_indices = list()
    for gix in range(len(graphs)):
        sheet_graph = graphs[gix]

        # collect and prepare the attributes for each node in this graph
        nodes_attributes = dict()
        for node, data in sheet_graph.nodes_iter(data=True):
            selected_data = dict()
            coordinates = Utils.get_node_region_coordinates(data)
            selected_data['area'] = data['area']
            selected_data['coordinates'] = coordinates
            selected_data['columns'] = list(
                range(coordinates['xmin'], coordinates['xmax']))
            selected_data['rows'] = list(
                range(coordinates['ymin'], coordinates['ymax']))
            selected_data['label'] = data['label']
            nodes_attributes[node] = selected_data

        # identify the tables in this graph
        tables_node_areas = Utils.group_node_area_tuples_by_tableId(
            sheet_graph, one_tbl_rule=True)
        # the ids of the tables in the graph
        table_ids = list(tables_node_areas.keys())
        n_tables = len(
            table_ids) if -1 not in table_ids else len(table_ids) - 1

        # the fragments that represent true tables (a.k.a, the target solution)
        tbl_fragments = list()
        for tbl_id in table_ids:
            table_nodes = set(tables_node_areas[tbl_id].keys())
            if tbl_id == -1:  # a reserved value for independent regions (i.e., located outside the annotated tables)
                # treat this independent regions individually (separately)
                for node_region in table_nodes:
                    tbl_fragments.append({node_region})
            else:
                tbl_fragments.append(table_nodes)

        tmx = metrics_function(nodes_attributes, tbl_fragments,
                               sheet_graph.graph['column widths'],
                               sheet_graph.graph['row heights'])

        # determine the number of random subgraphs to generate
        if n_frgm_per_table > 0:
            n_frgm = len(table_ids) * n_frgm_per_table
            if n_frgm > max_n_frgm:
                n_frgm = max_n_frgm
        else:
            n_frgm = max_n_frgm

        # generate random subgraphs.
        fragmentations = Generator.get_random_fragmentations(
            sheet_graph, n_frgm, as_subgraphs=False)
        for frag in fragmentations:
            fmx = metrics_function(nodes_attributes, frag,
                                   sheet_graph.graph['column widths'],
                                   sheet_graph.graph['row heights'])
            annotated_tables.append(tmx)
            other_fragmentations.append(fmx)
            table_counts.append(n_tables)
            graph_indices.append(gix)

        if n_tables > 1:
            count_multi.append(len(fragmentations))
        else:
            count_single.append(len(fragmentations))

    if verbose:
        print("\nSingle contributed %d" % sum(count_single))
        print("Count single %d" % len(count_single))
        print("Multi contributed %d" % sum(count_multi))
        print("Count multi %d" % len(count_multi))
    return annotated_tables, other_fragmentations, table_counts, graph_indices
Exemplo n.º 24
0
    def __init__(self,
                 image_shape,
                 n_classes,
                 scales=(32, 64, 128, 256, 512),
                 ratios=(0.5, 1, 2),
                 p4_box_size=224.0,
                 mode='train',
                 pretrain=True):
        # TODO: in_channels
        super().__init__()
        self.mode = mode
        self.image_shape = image_shape
        self.n_classes = n_classes

        # to 256*256 img, box is mostly from 20 to 80.
        # If stride is [4, 8, 16, 32, 64], then hope box 24 apply to P4(stride 16) with feature map size about 3.
        self.anchor_per_location = len(ratios)

        # default for resnet50
        feature_strides = [4, 8, 16, 32, 64]
        # if image size is 256, pyramid shapes is [[64,64], [32,32], [16,16], [8,8], [4,4]]
        pyramid_shapes = [np.array(image_shape) / x for x in feature_strides]
        # anchors = self.anchor_genertor.get_anchors(pyramid_shapes, feature_strides)
        anchors = Utils.generate_pyramid_anchors(scales, ratios,
                                                 pyramid_shapes,
                                                 feature_strides, 1)
        self.anchors = Utils.norm_boxes(
            anchors, image_shape=image_shape)  # (n_anchors, 4)

        self.gt_class_ids = None
        self.gt_boxes = None
        self.gt_masks = None
        self.active_class_ids = None

        # visualization feature map
        self.vfm = {}

        # self.resnet = ResNet50(in_channels, 2048)
        # TODO: channel
        self.resnet = ResNetPyTorch(pretrained=pretrain)
        self.fpn = FPN(2048, 256)
        self.rpn = RPN(256, anchors_per_location=self.anchor_per_location)

        self.proposal_layer = ProposalLayer(post_nms_rois=50,
                                            nms_threshold=0.7,
                                            pre_nms_limit=100)
        self.detection_target_layer = DetectionTargetLayer(
            proposal_positive_ratio=0.33,
            train_proposals_per_image=30,
            mask_shape=[28, 28],
            positive_iou_threshold=0.5)
        self.detection_layer = DetectionLayer(detection_max_instances=3,
                                              detection_nms_threshold=0.3)
        # in channel = fpn out channel, out channel = num_classes
        self.fpn_classifier = FPNClassifier(256,
                                            n_classes,
                                            fc_layers_size=1024,
                                            pool_size=7,
                                            image_shape=image_shape,
                                            p4_box_size=p4_box_size)
        self.fpn_mask = FPNMask(256,
                                n_classes,
                                mask_pool_size=14,
                                image_shape=image_shape,
                                p4_box_size=p4_box_size)