Пример #1
0
    def reset_parameters(self):
        for weight in self.parameters():
            stdv = get_threshold(weight.size(0))
            if weight.dim() == 2:
                stdv = get_threshold(weight.size(0), weight.size(1))

            weight.data.uniform_(-stdv, stdv)
Пример #2
0
    def __init__(self,
                 embed_size,
                 hidden_size,
                 output_size,
                 n_layers=1,
                 dropout=0.2):
        super(Decoder, self).__init__()
        self.embed_size = embed_size
        self.hidden_size = hidden_size
        self.output_size = output_size
        self.n_layers = n_layers

        self.embed = nn.Embedding(output_size, embed_size)
        stdv = get_threshold(output_size, embed_size)
        self.embed.weight.data.uniform_(-stdv, stdv)

        ### self.dropout = nn.Dropout(dropout, inplace=True)
        self.attention = Attention(hidden_size)
        self.gru = nn.GRU(2 * hidden_size + embed_size,
                          hidden_size,
                          n_layers,
                          dropout=dropout)
        self.out = nn.Linear(hidden_size * 3, output_size)
        stdv = get_threshold(hidden_size * 3, output_size)
        self.out.weight.data.uniform_(-stdv, stdv)
        self.out.bias.data.zero_()
Пример #3
0
def test(model, test_list, gt_imgs, gt_masks=None, fast=True):
    model.eval()
    dist_list = []
    for x in test_list:
        with torch.no_grad():
            z, _ = model(x.to(device))
            if model.L == 1:
                dist_list.append(torch.mean(z**2, dim=1).cpu().detach())
            else:
                dist_list.append(torch.mean(z[-1]**2, dim=1).cpu().detach())
    dist_list = torch.cat(dist_list, 0)

    score_map = F.interpolate(dist_list.unsqueeze(1),
                              size=x.size(2),
                              mode='bilinear',
                              align_corners=False).squeeze().numpy()
    score_map = gaussian_smooth(score_map)
    score_map = nomalize(score_map)
    img_scores = score_map.reshape(score_map.shape[0], -1).max(axis=1)

    result = {}
    result['img_aucroc'], result['img_fpr'], result['img_tpr'] = get_roc_auc(
        gt_imgs, img_scores)
    if fast:
        return result['img_aucroc']
    else:
        cls_threshold = get_threshold(gt_imgs, img_scores)
        result['pix_aucroc'], result['pix_fpr'], result[
            'pix_tpr'] = get_roc_auc(gt_masks.flatten(), score_map.flatten())
        sgm_threshold = get_threshold(gt_masks.flatten(), score_map.flatten())
        return result, cls_threshold, sgm_threshold, score_map, img_scores
Пример #4
0
    def __init__(self, hidden_size):
        super(Attention, self).__init__()
        self.hidden_size = hidden_size
        self.attn = nn.Linear(hidden_size * 3, hidden_size)
        stdv = get_threshold(hidden_size * 3, hidden_size)
        self.attn.weight.data.uniform_(-stdv, stdv)
        self.attn.bias.data.zero_()

        self.v = nn.Parameter(torch.rand(hidden_size))
        # stdv = 1. / math.sqrt(self.v.size(0))
        stdv = get_threshold(self.v.size(0))
        self.v.data.uniform_(-stdv, stdv)
Пример #5
0
def check_validate(vid):
    print("VALIDATING")
    unvalidated = tu.fetch_unvalidated_problem()
    if not unvalidated:
        return
    trained_model, model_id, problem_location = unvalidated
    saved_model = tf.keras.models.load_model(trained_model)

    (x_train, y_train), (x_test, y_test) = tu.fetch_active_problem()

    threshold = tu.get_threshold()
    hashy, transactions = tu.generate_input_hash()
    s = time.time()
    #check that the parameterization is what it should be
    if not check_params(saved_model, hashy):
        return False
    #check that the performance is what it should be
    if not check_performance(saved_model, threshold, x_test, y_test):
        return False
    e = time.time()
    #if both check out, the model is good
    metadata = {
        'pid': model_id,
        'vid': vid,
        'timestamp': time.time(),
        'location': problem_location,
        'hashy': hashy,
        'transactions': transactions,
    }
    print("REPORTING GOOD AT: " + problem_location)
    tu.report_good_model(metadata)
    return e - s
Пример #6
0
def check_mine():
    """mines the current boy"""
    (x_train, y_train), (x_test, y_test) = tu.fetch_active_problem()
    threshold = tu.get_threshold()
    hexy, transactions = tu.generate_input_hash()
    #train the model until the threshold is met
    i = 0
    c = 64
    a = 0
    while (a < threshold):
        if i > 5:
            print("Failed")
            return
        model = tu.generate_model(hexy, c)
        model.compile(optimizer='adam',
                      loss='sparse_categorical_crossentropy',
                      metrics=['accuracy'])
        model.fit(x_train, y_train, epochs=5)
        results = model.evaluate(x_test, y_test, verbose=2)
        a = results[1]
        i += 1
        c = c * 2

    #save the model
    fname = 'my_result' + str(random.randint(0, 10000)) + '.h5'
    model.save('unvalidated/' + fname)
    print("WROTE TO unvalidated/" + fname)
    tu.write_unvalidated_problem('unvalidated/' + fname, transactions, hexy)
Пример #7
0
    def __init__(self,
                 input_size,
                 embed_size,
                 hidden_size,
                 n_layers=1,
                 dropout=0.2):
        super(Encoder, self).__init__()
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.embed_size = embed_size
        self.embed = nn.Embedding(input_size, embed_size)
        ## stdv = 1. / math.sqrt(embed_size)
        ## self.embed.weight.data.normal_(0, stdv)
        stdv = get_threshold(input_size, embed_size)
        self.embed.weight.data.uniform_(-stdv, stdv)

        # forward_gru = GRUCell(embed_size, hidden_size, dropout=dropout)
        # backward_gru = GRUCell(embed_size, hidden_size, dropout=dropout)
        # self.gru = BiGRU(forward_gru, backward_gru)

        self.gru = nn.GRU(embed_size,
                          hidden_size,
                          n_layers,
                          dropout=dropout,
                          bidirectional=True)
Пример #8
0
    def find_page(self, im):
        """
        Finds and returns the test box within a given image.

        Args:
            im (numpy.ndarray): An ndarray representing the entire test image.

        Returns:
            numpy.ndarray: An ndarray representing the test box in the image.

        """
        # Convert image to grayscale then blur to better detect contours.
        imgray = cv.cvtColor(im, cv.COLOR_BGR2GRAY)
        threshold = utils.get_threshold(imgray)

        # Find contour for entire page.
        contours, _ = cv.findContours(threshold, cv.RETR_EXTERNAL,
                                      cv.CHAIN_APPROX_SIMPLE)
        contours = sorted(contours, key=cv.contourArea, reverse=True)

        if len(contours) > 0:
            # Approximate the contour.
            for contour in contours:
                peri = cv.arcLength(contour, True)
                approx = cv.approxPolyDP(contour, 0.02 * peri, True)

                # Verify that contour has four corners.
                if len(approx) == 4:
                    page = approx
                    break
        else:
            return None

        # Apply perspective transform to get top down view of page.
        return four_point_transform(imgray, page.reshape(4, 2))
Пример #9
0
def get_level_others_items(session, q_id, est_theta, shadow_bank, ans, db):
    """
    选出其他层的题目
    :param session:
    :param q_id:
    :param ans:
    :param est_theta:估计特质
    :param shadow_bank: 影子题库class
    :return:问题对象
    """

    # 当前测验所属层数(阶段)
    level = session['%s_stage' % q_id]

    # 该层已作答试题列表
    # 修改为适合json field
    # 不该出现于待抽提的题目ID列表
    a_level = int(level)
    not_in_index_list = get_has_answered_que_id_list(ans, a_level=a_level)

    # 抽出来的题
    que = yield shadow_bank(q_id, a_level, est_theta, not_in_index_list,
                            db).get_que()

    # 保存试题参数到session
    session['%s_a' % q_id].append(que.slop)
    session['%s_b' % q_id].append(get_threshold(que))

    # 返回试题
    raise gen.Return(que)
Пример #10
0
def get_level_others_items(session, q_id, est_theta, shadow_bank, ans, db):
    """
    选出其他层的题目
    :param session:
    :param q_id:
    :param ans:
    :param est_theta:估计特质
    :param shadow_bank: 影子题库class
    :return:问题对象
    """

    # 当前测验所属层数(阶段)
    level = session['%s_stage' % q_id]

    # 该层已作答试题列表
    # 修改为适合json field
    # 不该出现于待抽提的题目ID列表
    a_level = int(level)
    not_in_index_list = get_has_answered_que_id_list(ans, a_level=a_level)

    # 抽出来的题
    que = yield shadow_bank(q_id, a_level, est_theta, not_in_index_list, db).get_que()


    # 保存试题参数到session
    session['%s_a' % q_id].append(que.slop)
    session['%s_b' % q_id].append(get_threshold(que))

    # 返回试题
    raise gen.Return(que)
Пример #11
0
    def act_draw_boxes(self, image, threshold_constant):
        """
        Converts top and bottom lines into boxes and draws them onto the page.
        """
        threshold = utils.get_threshold(image, threshold_constant)
        # cv.imshow('', threshold)
        # cv.waitKey()
        contours, _ = cv.findContours(threshold, cv.RETR_EXTERNAL, 
            cv.CHAIN_APPROX_SIMPLE)
        contours = self.sort_contours_by_width(contours)
        line_contours = sorted(self.get_line_contours(contours[:20], image, 5, 6), key=lambda y: self.get_line_contour_y(y), reverse=False)
        
        # colorim = cv.cvtColor(image, cv.COLOR_GRAY2BGR)
        # cv.drawContours(colorim,line_contours, -1, (133,255,255), 3)
        # cv.imshow('', colorim)
        # cv.waitKey()
        # we add one because we don't grade the first box
        num_expected_boxes = len(self.config['boxes'])  
        h, w = image.shape
        min_box_height = (h/num_expected_boxes+1)/2
        prev_contour = line_contours[0]
        boxes_to_draw = deque()

        areas_to_erase = []
        x = 1
        for c in line_contours:
            # calculating height by finding difference beween y values.
            current_box_height = self.get_line_contour_y(c) - self.get_line_contour_y(prev_contour)
            erase_height = int(h*0.013)
            line_separation = round(h*0.0034)
            if current_box_height > min_box_height:
                ty = self.get_line_contour_y(prev_contour)
                by = self.get_line_contour_y(c)
                boxes_to_draw.append(np.array(([x, ty+line_separation],
                                               [w-1, ty+line_separation],
                                               [w-1, by-line_separation],
                                               [x, by-line_separation]),
                                              dtype=np.int32))
                areas_to_erase.append(np.array(([x, ty-erase_height], [w-1, ty-erase_height],
                                                [w-1, ty+erase_height], [x, ty+erase_height]), dtype=np.int32))
            prev_contour = c
        # Make sure that theres a box at the top of the page
        top_box_y_pos = boxes_to_draw[0][0][1]
        bottom_y_pos = boxes_to_draw[-1][-1][1]
        image_height = image.shape[0]
        if top_box_y_pos <= image_height*0.01:
            boxes_to_draw.popleft()
        if bottom_y_pos < image_height*0.99:
            x, ty, by = (0, bottom_y_pos, image_height)
            boxes_to_draw.append(np.array(([0,ty+5],[w,ty+5],[w,by-5], [0,by-5]), dtype=np.int32)) 
        if len(boxes_to_draw) < num_expected_boxes:
            return None
        #need to erase at the very end of the page too
        areas_to_erase.append(np.array(([x,by-erase_height],[w,by-erase_height],[w,by+erase_height], [x,by+erase_height]), dtype=np.int32))
        cv.drawContours(image, areas_to_erase, -1, 255, -1) 
        #the first box contains student info, no answers
        cv.drawContours(image, boxes_to_draw, -1, 0, 1)
        return image
Пример #12
0
    def __init__(self,
                 embed_size,
                 hidden_size,
                 output_size,
                 n_layers=1,
                 dropout=0.2):
        super(Decoder, self).__init__()
        self.embed_size = embed_size
        self.hidden_size = hidden_size
        self.output_size = output_size
        self.n_layers = n_layers

        self.embed = nn.Embedding(output_size, embed_size)
        stdv = get_threshold(output_size, embed_size)
        self.embed.weight.data.uniform_(-stdv, stdv)
        ## stdv = 1. / math.sqrt(embed_size)
        ## self.embed.weight.data.normal_(0, stdv)

        ### self.dropout = nn.Dropout(dropout, inplace=True)
        # self.init_state = nn.Linear(hidden_size, hidden_size) ## Edit by Wu Kaixin 1/14
        # stdv = get_threshold(hidden_size, hidden_size)
        # self.init_state.weight.data.uniform_(-stdv, stdv)
        #vself.init_state.bias.data.zero_()

        self.attention = Attention(hidden_size)
        # self.gru = nn.GRU(2 * hidden_size + embed_size, hidden_size,
        #                   n_layers, dropout=dropout)
        self.gru = nn.GRU(hidden_size + embed_size,
                          hidden_size,
                          n_layers,
                          dropout=dropout)

        self.att_hidden_state = nn.Linear(3 * hidden_size, hidden_size)
        stdv = get_threshold(3 * hidden_size, hidden_size)
        self.att_hidden_state.weight.data.uniform_(-stdv, stdv)
        self.att_hidden_state.bias.data.zero_()

        # self.out = nn.Linear(hidden_size * 3, output_size)
        # stdv = get_threshold(hidden_size * 3, output_size)

        self.out = nn.Linear(hidden_size, output_size)
        stdv = get_threshold(hidden_size, output_size)
        self.out.weight.data.uniform_(-stdv, stdv)
        self.out.bias.data.zero_()
Пример #13
0
    def get_que_then_redirect(self):
        q = self.q
        q_id = self.q_id
        que_id = self.que_id
        db = self.db
        ans = self.ans
        session = self.session
        # 将是否重启测验设定为false
        session['is_%s_re_start' % q_id] = False
        # 下面是第一阶段抽题
        if session['%s_stage' % q_id] == 1:
            yield db.execute("UPDATE answer SET order_answer=%s, score_answer=%s WHERE id=%s",
                             (Json(ans.order_answer), Json(ans.score_answer), ans.aid))
            raise gen.Return('/cat/%s' % q_id)
        else:
            # 获取已作答项目参数
            self.a = np.array(session['%s_a' % q_id])
            self.b = np.array(session['%s_b' % q_id])
            self.score = np.array(session['%s_score' % q_id])

            # 计算潜在特质
            self.theta = self.get_theta()
            # 计算误差
            info = self.get_info()

            # 保存误差和潜在特质的值
            # 修改为适合json field
            ans.score_answer[str(que_id)].update({'info': info, 'theta': self.theta})
            # 被试答题过程
            flow = Flow(flow=q.flow, name=session.session_key)

            if session['%s_stage' % q_id] == flow.level_len + 1:
                # 上面是结束规则
                yield db.execute("UPDATE answer SET theta=%s, info=%s, has_finished=%s,"
                                 "order_answer=%s, score_answer=%s WHERE id=%s",
                                 (self.theta, info, True, Json(ans.order_answer), Json(ans.score_answer), ans.aid))

                # 删除所有测验相关session键值
                del_session(session, q_id)

                # 返回到问卷列表页面
                raise gen.Return('/result/%s' % q_id)
            else:
                # 第二阶段抽题
                que = yield get_level_others_items(session, q_id, self.theta, self.get_shadow_bank(), ans, db)
                session['q_%s_id' % q_id] = que
                level = session['%s_stage' % q_id]
                index_key = session['%s_step' % q_id] + 1
                ans.score_answer[str(que.id)] = {'a_level': level,
                                                 'slop': que.slop,
                                                 'threshold': get_threshold(que)}
                session['%s_next_item' % q_id] = que
                ans.order_answer[index_key] = que.id
                yield db.execute("UPDATE answer SET order_answer=%s, score_answer=%s WHERE id=%s",
                                 (Json(ans.order_answer), Json(ans.score_answer), ans.aid))
                raise gen.Return('/cat/%s' % q_id)
Пример #14
0
 def get_count_and_info_values_list(self, shadow_questions):
     a_array = np.array([])
     b_array = np.array([])
     count_array = np.array([])
     for que in shadow_questions:
         a_array = np.append(a_array, que.slop)
         b_array = np.append(a_array, get_threshold(que))
         count_array = np.append(count_array, que.count)
     # 下面将斜率变成二维数组,才能计算,否则会跳出不能计算的异常
     a_array.shape = a_array.shape[0], 1
     return count_array, GrmIRTInfo(a_array, b_array, self.est_theta).get_item_info_list()
Пример #15
0
 def get_count_and_info_values_list(self, shadow_questions):
     a_array = np.array([])
     b_array = np.array([])
     count_array = np.array([])
     for que in shadow_questions:
         a_array = np.append(a_array, que.slop)
         b_array = np.append(a_array, get_threshold(que))
         count_array = np.append(count_array, que.count)
     # 下面将斜率变成二维数组,才能计算,否则会跳出不能计算的异常
     a_array.shape = a_array.shape[0], 1
     return count_array, GrmIRTInfo(a_array, b_array,
                                    self.est_theta).get_item_info_list()
Пример #16
0
def create_duo_word_clouds(infold, outfold, sub, city1, city2, stopwords):
    vocab1 = utils.load_vocab('vocabs/{}/{}.vocab'.format(sub, city1), 3)
    vocab2 = utils.load_vocab('vocabs/{}/{}.vocab'.format(sub, city2), 3)

    thres1, thres2 = utils.get_threshold(sub, city1, city2)
    results1, results2 = utils.compare_vocabs(vocab1, vocab2, city1, city2,
                                              thres1, thres2)
    text1 = create_text_wc(results1)
    text2 = create_text_wc(results2)

    # frequencies1 = utils.filter_stopwords(results1, stopwords, filter_unprintable=True)
    # frequencies2 = utils.filter_stopwords(results2, stopwords, filter_unprintable=True)
    create_duo_word_clouds_helper(outfold, sub, text1, city1, text2, city2,
                                  stopwords)
Пример #17
0
def train(data_loader, net, loss, epoch, optimizer, get_lr, save_dir):
    start_time = time.time()
    net.train()
    lr = get_lr(epoch)
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr

    total_train_loss = []
    class_predict = []
    class_target = []
    for i, sample in enumerate(data_loader):
        data = sample['image']
        target_c = sample['label_c']
        target_s = sample['label_s']
        data = data.to(DEVICE)
        target_c = target_c.to(DEVICE)
        target_s = target_s.to(DEVICE)
        output_s, output_c = net(data)
        optimizer.zero_grad()
        cur_loss, _, _, c_p = loss(output_s, output_c, target_s, target_c)
        total_train_loss.append(cur_loss.item())
        class_target.append(target_c.detach().cpu().numpy())
        class_predict.append(c_p.detach().cpu().numpy())
        cur_loss.backward()
        optimizer.step()

    logging.info(
        'Epoch[%d], Batch [%d], total loss is %.6f, using %.1f s!' %
        (epoch, i, np.mean(total_train_loss), time.time() - start_time))
    total_train_class_predict = np.concatenate(class_predict, 0)
    total_train_class_target = np.concatenate(class_target, 0)
    adaptive_thresholds = get_threshold(total_train_class_predict,
                                        total_train_class_target, 0.995)
    cur_precision, _ = metric(total_train_class_predict,
                              total_train_class_target, adaptive_thresholds)
    logging.info(
        'Epoch[%d], [precision=%.4f, -->%.3f, -->%.3f, -->%.3f, -->%.3f]' %
        (epoch, np.mean(cur_precision), np.mean(cur_precision[0]),
         np.mean(cur_precision[1]), np.mean(
             cur_precision[2]), np.mean(cur_precision[3])))
    logging.info('the adaptive thresholds is [%.4f, %.4f, %.4f, %.4f]' %
                 (adaptive_thresholds[0], adaptive_thresholds[1],
                  adaptive_thresholds[2], adaptive_thresholds[3]))
    return np.mean(total_train_loss), adaptive_thresholds
Пример #18
0
    def __init__(self, hidden_size):
        '''
        :param hidden_size: the size of hidden layer
        '''
        self.hidden_size = hidden_size
        self.threshold = utils.get_threshold(hidden_size, hidden_size)

        # W = np.random.randn(-np.sqrt(3./hidden_size), np.sqrt(3./hidden_size), (3, hidden_size, hidden_size))
        # U = np.random.randn(-np.sqrt(3./hidden_size), np.sqrt(3./hidden_size), (3, hidden_size, hidden_size))       
        # b = np.zeros((3, hidden_size))
        W = self.threshold * np.random.randn(3, hidden_size, hidden_size)
        U = self.threshold * np.random.randn(3, hidden_size, hidden_size)
        b = np.zeros((3, hidden_size))

        self.W = theano.shared(name="W", value=W.astype(theano.config.floatX), borrow=True)
        self.U = theano.shared(name="U", value=U.astype(theano.config.floatX), borrow=True)
        self.b = theano.shared(name="b", value=b.astype(theano.config.floatX), borrow=True)

        self.params = [self.W, self.U, self.b]
Пример #19
0
 def _record_snapshot(self, pi, yprop, sprop, d):
     """Compute snapshot of policy and append to current history."""
     # policy
     if self.policy_type == "logistic":
         self.deployed["pis"].append(LogisticPolicy(pi, self.cost))
     elif self.policy_type == "deterministic":
         self.deployed["pis"].append(DeterministicThreshold(pi, self.cost))
     elif self.policy_type == "fixed":
         self.deployed["pis"].append(pi)
     else:
         raise RuntimeError(
             f"Cannot record full snapshot for policy {self.policy_type}"
         )
     # utility
     if pi is not None:
         utility = utils.utility(self.td, pi, self.cost, self.n_util_estim)
     else:
         utility = np.nan
     self.deployed["utilities"].append(utility)
     # threshold
     if pi is not None:
         if "deterministic" in pi.type:
             tmp_cost = self.cost
         else:
             tmp_cost = None
         threshold = utils.get_threshold(pi.theta, tmp_cost)
         if threshold is not None:
             self.deployed["thresholds"].append(threshold)
     # test metrics
     xtest, ytest, stest = self.td.sample_all(self.n_util_estim)
     if pi is not None:
         dtest = pi.sample(xtest)
     else:
         dtest = ytest.copy()
     self.test_history.snapshot(ytest, dtest, stest)
     # reaped utility and metrics
     if yprop is not None and sprop is not None and d is not None:
         # reaped utility
         reaped_utility = np.sum(yprop[d] - self.cost) / self.n_samples
         self.deployed["reaped_utilities"].append(reaped_utility)
         # reaped metrics
         self.reaped_history.snapshot(yprop, d, sprop)
Пример #20
0
    def get_box(self):
        """
        Finds and returns the contour for this test answer box.

        Returns:
            numpy.ndarray: An ndarray representing the answer box in
                the test image.

        """
        # Blur and threshold the page, then find boxes within the page.
        threshold = utils.get_threshold(self.page)
        contours, _ = cv.findContours(threshold, cv.RETR_TREE,
                                      cv.CHAIN_APPROX_SIMPLE)
        contours = sorted(contours, key=cv.contourArea, reverse=True)

        # Iterate through contours until the correct box is found.
        for contour in contours:
            if self.is_box(contour, threshold):
                return utils.get_transform(contour, threshold)

        return None
Пример #21
0
    def detect_roles(unclear_role):
        unclear_role = normalize(unclear_role)

        for role in ALL_ROLES:
            examples = role['examples']
            for example in examples:
                if unclear_role == example:
                    return set([role["name"]])

        terms = string_to_list(unclear_role)
        roles = set([])

        for term in terms:
            if term in TERMS_ROLES:
                roles = roles | set([TERMS_ROLES[term]])

        if roles:
            return roles

        for term in terms:
            threshold = get_threshold(len(term))
            best_fit_term = ""

            for example, role in TERMS_ROLES.items():
                dis = distance(example, term)

                if dis < threshold or dis == threshold:
                    threshold = dis
                    best_fit_term = example

            if best_fit_term:
                roles = roles | set([TERMS_ROLES[best_fit_term]])

        if roles:
            return roles

        return set(["UNCERTAIN"])
Пример #22
0
    def __init__(self,
                 input_size,
                 embed_size,
                 hidden_size,
                 n_layers=1,
                 dropout=0.2):
        super(Encoder, self).__init__()
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.embed_size = embed_size
        self.embed = nn.Embedding(input_size, embed_size)
        stdv = get_threshold(input_size, embed_size)
        self.embed.weight.data.uniform_(-stdv, stdv)

        # self.gru = nn.GRU(embed_size, hidden_size, n_layers,
        #                  dropout=dropout, bidirectional=True)
        self.gru_forward = nn.GRU(embed_size,
                                  hidden_size,
                                  n_layers,
                                  dropout=dropout)
        self.gru_backward = nn.GRU(embed_size,
                                   hidden_size,
                                   n_layers,
                                   dropout=dropout)
Пример #23
0
                              mode='bilinear',
                              align_corners=False).squeeze().numpy()

    # if cf.gaus_smooth:
    score_map = gaussian_smooth(score_map)
    # if cf.nomalize:
    score_map = nomalize(score_map)
    # image-level anomaly score
    img_scores = score_map.reshape(score_map.shape[0], -1).max(axis=1)

    result = {}

    gt_list = np.asarray(gt_list)
    result['img_aucroc'], result['img_fpr'], result['img_tpr'] = get_roc_auc(
        gt_list, img_scores)
    cls_threshold = get_threshold(gt_list, img_scores)

    gt_mask = np.asarray(gt_mask_list)
    result['pix_aucroc'], result['pix_fpr'], result['pix_tpr'] = get_roc_auc(
        gt_mask.flatten(), score_map.flatten())
    sgm_threshold = get_threshold(gt_mask.flatten(), score_map.flatten())

    print('%s | image ROCAUC: %.3f | pixel ROCAUC: %.3f\n' %
          (class_name, result['img_aucroc'], result['pix_aucroc']))
    results[class_name] = result
    if cf.vis:
        pic_path = os.path.join(cf.save_path, cf.experiment_name, 'pictures')
        os.makedirs(pic_path, exist_ok=True)
        plt_fig(test_imgs, score_map, img_scores, gt_mask_list, sgm_threshold,
                cls_threshold, pic_path, class_name)
Пример #24
0
def main(opt):
    attr_list = utils.get_all_attr()
    attr_name = attr_list[opt['attribute']]

    #print(attr_name)
    print(opt)

    if opt['experiment'] == 'baseline':
        train = create_dataset_actual(
            opt['data_setting']['path'],
            opt['data_setting']['attribute'],
            opt['data_setting']['protected_attribute'],
            opt['data_setting']['params_real_train'],
            opt['data_setting']['augment'],
            CelebaDataset,
            number=opt['number'])

        val = create_dataset_actual(opt['data_setting']['path'],
                                    opt['data_setting']['attribute'],
                                    opt['data_setting']['protected_attribute'],
                                    opt['data_setting']['params_real_val'],
                                    False,
                                    CelebaDataset,
                                    split='valid')
        val_weight = None

        test = create_dataset_actual(
            opt['data_setting']['path'],
            opt['data_setting']['attribute'],
            opt['data_setting']['protected_attribute'],
            opt['data_setting']['params_real_val'],
            False,
            CelebaDataset,
            split='test')

    elif opt['experiment'] == 'model':
        train = create_dataset_all(opt['data_setting']['real_params'],
                                   opt['data_setting']['fake_params'],
                                   opt['data_setting']['params_train'],
                                   opt['data_setting']['augment'],
                                   CelebaDataset,
                                   split='train')

    elif opt['experiment'] == 'model_inv':
        train = create_dataset_inv(opt['data_setting']['real_params'],
                                   opt['data_setting']['fake_params'],
                                   opt['data_setting']['params_train'],
                                   opt['data_setting']['augment'],
                                   CelebaDataset,
                                   split='train')

    elif opt['experiment'] == 'fake_only':
        train = create_dataset_reflections(opt['data_setting']['fake_params'],
                                           opt['data_setting']['params_train'],
                                           opt['data_setting']['augment'],
                                           CelebaDataset)

    if opt['experiment'] in ['model', 'model_inv', 'fake_only']:
        val = create_dataset_actual(
            opt['data_setting']['real_params']['path'],
            opt['data_setting']['real_params']['attribute'],
            opt['data_setting']['real_params']['protected_attribute'],
            opt['data_setting']['params_val'],
            False,
            CelebaDataset,
            split='valid')

        val_weight = utils.compute_class_weight(val, opt['device'],
                                                opt['dtype']).cpu().numpy()

        test = create_dataset_actual(
            opt['data_setting']['real_params']['path'],
            opt['data_setting']['real_params']['attribute'],
            opt['data_setting']['real_params']['protected_attribute'],
            opt['data_setting']['params_val'],
            False,
            CelebaDataset,
            split='test')

    # Train the attribute classifier
    save_path = opt['save_folder'] + '/best.pth'
    save_path_curr = opt['save_folder'] + '/current.pth'
    if not opt['test_mode']:
        print('Starting to train model...')
        model_path = None
        if path.exists(save_path_curr):
            print('Model exists, resuming training')
            model_path = save_path_curr
        AC = attribute_classifier(opt['device'],
                                  opt['dtype'],
                                  modelpath=model_path)
        for i in range(AC.epoch, opt['total_epochs']):
            AC.train(train)
            acc = AC.check_avg_precision(val, weights=val_weight)
            if (acc > AC.best_acc):
                AC.best_acc = acc
                AC.save_model(save_path)
            AC.save_model(save_path_curr)

    AC = attribute_classifier(opt['device'], opt['dtype'], modelpath=save_path)
    val_targets, val_scores = AC.get_scores(val)
    test_targets, test_scores = AC.get_scores(test)

    with open(opt['save_folder'] + '/val_scores.pkl', 'wb+') as handle:
        pickle.dump(val_scores, handle)
    with open(opt['save_folder'] + '/val_targets.pkl', 'wb+') as handle:
        pickle.dump(val_targets, handle)
    with open(opt['save_folder'] + '/test_scores.pkl', 'wb+') as handle:
        pickle.dump(test_scores, handle)
    with open(opt['save_folder'] + '/test_targets.pkl', 'wb+') as handle:
        pickle.dump(test_targets, handle)

    cal_thresh = utils.calibrated_threshold(val_targets[:, 0], val_scores)
    f1_score, f1_thresh = utils.get_threshold(val_targets[:, 0], val_scores)
    val_pred = np.where(val_scores > cal_thresh, 1, 0)
    test_pred = np.where(test_scores > cal_thresh, 1, 0)

    ap, ap_std = utils.bootstrap_ap(val_targets[:, 0], val_scores)
    deo, deo_std = utils.bootstrap_deo(val_targets[:, 1], val_targets[:, 0],
                                       val_pred)
    ba, ba_std = utils.bootstrap_bias_amp(val_targets[:, 1], val_targets[:, 0],
                                          val_pred)
    kl, kl_std = utils.bootstrap_kl(val_targets[:, 1], val_targets[:, 0],
                                    val_scores)

    val_results = {
        'AP': ap,
        'AP_std': ap_std,
        'DEO': deo,
        'DEO_std': deo_std,
        'BA': ba,
        'BA_std': ba_std,
        'KL': kl,
        'KL_std': kl_std,
        'f1_thresh': f1_thresh,
        'cal_thresh': cal_thresh,
        'opt': opt
    }

    print('Validation results: ')
    print('AP : {:.1f} +- {:.1f}', 100 * ap, 200 * ap_std)
    print('DEO : {:.1f} +- {:.1f}', 100 * deo, 200 * deo_std)
    print('BA : {:.1f} +- {:.1f}', 100 * ba, 200 * ba_std)
    print('KL : {:.1f} +- {:.1f}', kl, 2 * kl)

    with open(opt['save_folder'] + '/val_results.pkl', 'wb+') as handle:
        pickle.dump(val_results, handle)

    ap, ap_std = utils.bootstrap_ap(test_targets[:, 0], test_scores)
    deo, deo_std = utils.bootstrap_deo(test_targets[:, 1], test_targets[:, 0],
                                       test_pred)
    ba, ba_std = utils.bootstrap_bias_amp(test_targets[:, 1],
                                          test_targets[:, 0], test_pred)
    kl, kl_std = utils.bootstrap_kl(test_targets[:, 1], test_targets[:, 0],
                                    test_scores)

    test_results = {
        'AP': ap,
        'AP_std': ap_std,
        'DEO': deo,
        'DEO_std': deo_std,
        'BA': ba,
        'BA_std': ba_std,
        'KL': kl,
        'KL_std': kl_std,
        'f1_thresh': f1_thresh,
        'cal_thresh': cal_thresh,
        'opt': opt
    }

    print('Test results: ')
    print('AP : {:.1f} +- {:.1f}', 100 * ap, 200 * ap_std)
    print('DEO : {:.1f} +- {:.1f}', 100 * deo, 200 * deo_std)
    print('BA : {:.1f} +- {:.1f}', 100 * ba, 200 * ba_std)
    print('KL : {:.1f} +- {:.1f}', kl, 2 * kl)

    with open(opt['save_folder'] + '/test_results.pkl', 'wb+') as handle:
        pickle.dump(test_results, handle)
Пример #25
0
 def reset_parameters(self):
     stdv = get_threshold(self.input_size)
     for weight in self.parameters():
         weight.data.uniform_(-stdv, stdv)
Пример #26
0
    def __init__(self, vocab_size, hidden_size, lr, batch_size):

        self.threshold = utils.get_threshold(hidden_size, hidden_size)
        self.wordEmbedding = theano.shared(name="wordEmbedding", value=utils.init_randn(vocab_size, hidden_size), 
            borrow=True)

        W = self.threshold * np.random.randn(3, hidden_size, hidden_size)
        U = self.threshold * np.random.randn(3, hidden_size, hidden_size)
        b = np.zeros((3, hidden_size))

        self.W = theano.shared(name="W", value=W.astype(theano.config.floatX), borrow=True)
        self.U = theano.shared(name="U", value=U.astype(theano.config.floatX), borrow=True)
        self.b = theano.shared(name="b", value=b.astype(theano.config.floatX), borrow=True)

        self.V = theano.shared(name="V", value=np.zeros((hidden_size, vocab_size), dtype=theano.config.floatX), 
            borrow=True)

        self.lr = lr 
        self.params = [self., self.W, self.U, self.b, self.V]

        print 'ok1' 
        index = T.imatrix()               # (max_sentence_size, batch_szie)
        x = self.wordEmbedding[index]     # (max_sentence_size, batch_size, hidden_size)
        mask = T.imatrix("mask")          # (max_sentence_size, batch_size)  mask of input x
        y_expect = T.imatrix()            # (max_sentence_size, batch)

        h0 = T.alloc(np.asarray(0., dtype=theano.config.floatX), batch_size, hidden_size)
        def oneStep(x, h_tm1):
            print 'ok3'
            z = T.nnet.sigmoid(T.dot(x, self.W[0]) + T.dot(h_tm1, self.U[0]) + self.b[0])  # (batch_size, hidden_size)
            r = T.nnet.sigmoid(T.dot(x, self.W[1]) + T.dot(h_tm1, self.U[1]) + self.b[1])  # (batch_size, hidden_size)
            h_ = T.tanh(T.dot(x, self.W[2]) + T.dot(r*h_tm1, self.U[2]) + self.b[2])       # (batch_size, hidden_size)
            h_t = (1. - z)*h_tm1 + z*h_
            print 'ok4'                                        # (batch_size, hidden_size)

            # h_t = m[:, None]*h_t + (1.0 - m)[:, None]*h_tm1
            # h_t = T.cast(h_t, theano.config.floatX)

            return h_t

        print 'ok2'
        h, updates = theano.scan(fn=oneStep,
                                sequences=x,
                                outputs_info=h0)

        print type(h), h.type 
        temp = T.dot(h, self.V)
        y, updates = theano.scan(fn=lambda x: T.nnet.softmax(x),
                 sequences=temp)
        print type(y), y.type
        print "ok5"
        def calcuCost(y_t, y_expect_t, msk):
            return T.sum((T.log(y_t)[T.arange(y_t.shape[0]), y_expect_t]) * msk)

        loss, updates = theano.scan(fn=calcuCost,
                                    sequences=[y, y_expect, mask])
        print "ok6"
        probSum = T.sum(loss)
        totalNum = T.sum(mask)
        # cost = T.sum(loss) / T.sum(mask)
        cost = probSum / totalNum

        paramsGrads = [T.grad(cost, param) for param in self.params]
        paramsUpdates = [(param, param + self.lr * g) for param, g in zip(self.params, paramsGrads)]
        self.train = theano.function(inputs=[index, y_expect, mask],
                                     outputs=[cost, probSum, totalNum],
                                     updates=paramsUpdates)
Пример #27
0
            label_set.append(label.cpu().numpy())
            valid_loss += loss.item()

        valid_loss = valid_loss / len(dev_X)
        pred_set = np.concatenate(pred_set, axis=0)
        label_set = np.concatenate(label_set, axis=0)
        # for i in range(len(pred_set)):
        #     if valid_dataset.type_error[i]==0:
        #         pred_set[i,0] = 0
        # top_class = np.argmax(pred_set, axis=1)
        # equals = top_class == label_set
        # accuracy = np.mean(equals)
        # print('acc', accuracy)
        k = np.array(valid_dataset.gap)

        INFO_THRE, thre_list = get_threshold(pred_set[k == 1],
                                             label_set[k == 1])
        INFO_THRE, thre_list = get_threshold(pred_set[k == 0],
                                             label_set[k == 0])
        INFO_THRE, thre_list = get_threshold(pred_set, label_set)

        print('round', round, 'epoch', epoch,
              'train loss %f, val loss %f ' % (train_loss, valid_loss),
              INFO_THRE)

    #torch.save(model.state_dict(), 'model_ner/ner_link_round_%s.pth' % round)
    pred_vector.append(pred_set)
    round += 1
    # INFO = 'train loss %f, valid loss %f, acc %f, recall %f, f1 %f ' % (train_loss, valid_loss, INFO_THRE[0], INFO_THRE[1], INFO_THRE[2])
    # logging.info(INFO)
    # INFO = 'epoch %d, train loss %f, valid loss %f' % (epoch, train_loss, valid_loss)
    # logging.info(INFO + '\t' + INFO_THRE)
Пример #28
0
                                                  mask_X,
                                                  label_smoothing=False)
        ner_pred = ner_pred.argmax(dim=-1)
        #ner_pred = crf.decode(ner_logits, mask=mask_X)
        for i, item in enumerate(ner_pred):
            x = ner_pred[i][0:length[i]].cpu().numpy()
            y = ner[i][0:length[i]].cpu().numpy()
            ner_pred_set.append(x)
            ner_label_set.append(y)
        intent_pred_set.append(intent_pred.cpu().numpy())
        intent_label_set.append(intent_label.cpu().numpy())
        # ner_pred_set.append(ner_pred.view(-1, ner_pred.size()[-1]).argmax(dim=-1).cpu().numpy())
        # ner_label_set.append(ner.view(-1).cpu().numpy())
        valid_loss += loss * X.size()[0]
        total_length += length.sum()
    valid_loss = valid_loss / len(dev_X)
    intent_pred_set = np.concatenate(intent_pred_set, axis=0)
    intent_label_set = np.concatenate(intent_label_set, axis=0)
    ner_pred_set = np.concatenate(ner_pred_set, axis=0)
    ner_label_set = np.concatenate(ner_label_set, axis=0)
    INFO_THRE = ner_parser(ner_pred_set, ner_label_set)
    #INFO_THRE = get_threshold(pred_set, label_set)
    INFO = 'epoch %d, train loss %f, valid loss %f' % (epoch, train_loss,
                                                       valid_loss)
    logging.info(INFO + '\t' + INFO_THRE)
    print(INFO + '\t' + INFO_THRE)
    INFO_THRE = get_threshold(intent_pred_set, intent_label_set)
    #INFO = 'epoch %d, train loss %f, valid loss %f' % (epoch, train_loss, valid_loss)
    logging.info(INFO_THRE)
    print(INFO_THRE)
Пример #29
0
def get_level_one_item(ans, session, q, level_one_count, db):
    """
    选出第一层的题目
    :param ans:
    :param session:
    :param q: 问卷对象
    :param level_one_count: 第一层待抽的题目数量
    :return: 返回第一次抽取的题目对象
    """
    que = None

    # 测验ID
    q_id = q.id

    # 生成答题者已经做过的题目的id列表
    can_not_in_choices_index = get_has_answered_que_id_list(ans, 1)

    # 第一层题库的题量
    _count = q.level_one_count - len(can_not_in_choices_index)

    # 没题目直接封权限
    if _count < level_one_count:
        raise HTTPError(403)

    # 题量/待抽题量,确定每道试题抽取题库的范围
    _slice = _count / level_one_count

    # json field
    # 选出来的题目放入这个字典
    selected_que_dict = {}
    # 选出来的题名顺序放入这个字典
    order_que = {}
    # 第一阶段的试题存入session的列表
    next_item_list = []

    choice_question_index_list = []

    for i in range(level_one_count):
        # 上界
        pre = _slice * i + 1
        # 下界
        nxt = _slice * (i + 1)
        try:
            choice_question_index = random.choice(xrange(pre, nxt))
        except IndexError:
            choice_question_index = i + 1
        choice_question_index_list.append(choice_question_index)

    if not can_not_in_choices_index:
        cursor = yield db.execute("""
                select * from (select *, row_number() over(order by threshold) row_num from question
                where questionnaire_id=%s and a_level=%s ) as temp
                where row_num in %s
                """, (q.id, 1, tuple(choice_question_index_list)))
    else:
        cursor = yield db.execute("""
                select * from (select *, row_number() over(order by threshold) row_num from question
                where questionnaire_id=%s and a_level=%s and not (id in %s) ) as temp
                where row_num in %s
                """, (q.id, 1, tuple(can_not_in_choices_index), tuple(choice_question_index_list)))

    use_question_list = cursor.fetchall()

    for i, use_question in enumerate(use_question_list):
        # 保存试题参数到session
        session['%s_a' % q_id].append(use_question.slop)
        session['%s_b' % q_id].append(get_threshold(use_question))
        # oder_que的key
        index_key = i + 1
        order_que[index_key] = use_question.id
        selected_que_dict[use_question.id] = {'a_level': 1,
                                              'slop': use_question.slop,
                                              'threshold': get_threshold(use_question)}
        if i == 0:
            que = use_question
        else:
            next_item_list.append(use_question)

    ans.score_answer.update(selected_que_dict)
    ans.order_answer.update(order_que)
    # session存入下面题目
    session['%s_next_item' % q.id] = next_item_list
    raise gen.Return(que)
Пример #30
0
def get_level_one_item(ans, session, q, level_one_count, db):
    """
    选出第一层的题目
    :param ans:
    :param session:
    :param q: 问卷对象
    :param level_one_count: 第一层待抽的题目数量
    :return: 返回第一次抽取的题目对象
    """
    que = None

    # 测验ID
    q_id = q.id

    # 生成答题者已经做过的题目的id列表
    can_not_in_choices_index = get_has_answered_que_id_list(ans, 1)

    # 第一层题库的题量
    _count = q.level_one_count - len(can_not_in_choices_index)

    # 没题目直接封权限
    if _count < level_one_count:
        raise HTTPError(403)

    # 题量/待抽题量,确定每道试题抽取题库的范围
    _slice = _count / level_one_count

    # json field
    # 选出来的题目放入这个字典
    selected_que_dict = {}
    # 选出来的题名顺序放入这个字典
    order_que = {}
    # 第一阶段的试题存入session的列表
    next_item_list = []

    choice_question_index_list = []

    for i in range(level_one_count):
        # 上界
        pre = _slice * i + 1
        # 下界
        nxt = _slice * (i + 1)
        try:
            choice_question_index = random.choice(xrange(pre, nxt))
        except IndexError:
            choice_question_index = i + 1
        choice_question_index_list.append(choice_question_index)

    if not can_not_in_choices_index:
        cursor = yield db.execute(
            """
                select * from (select *, row_number() over(order by threshold) row_num from question
                where questionnaire_id=%s and a_level=%s ) as temp
                where row_num in %s
                """, (q.id, 1, tuple(choice_question_index_list)))
    else:
        cursor = yield db.execute(
            """
                select * from (select *, row_number() over(order by threshold) row_num from question
                where questionnaire_id=%s and a_level=%s and not (id in %s) ) as temp
                where row_num in %s
                """, (q.id, 1, tuple(can_not_in_choices_index),
                      tuple(choice_question_index_list)))

    use_question_list = cursor.fetchall()

    for i, use_question in enumerate(use_question_list):
        # 保存试题参数到session
        session['%s_a' % q_id].append(use_question.slop)
        session['%s_b' % q_id].append(get_threshold(use_question))
        # oder_que的key
        index_key = i + 1
        order_que[index_key] = use_question.id
        selected_que_dict[use_question.id] = {
            'a_level': 1,
            'slop': use_question.slop,
            'threshold': get_threshold(use_question)
        }
        if i == 0:
            que = use_question
        else:
            next_item_list.append(use_question)

    ans.score_answer.update(selected_que_dict)
    ans.order_answer.update(order_que)
    # session存入下面题目
    session['%s_next_item' % q.id] = next_item_list
    raise gen.Return(que)
Пример #31
0
    def __init__(self,
                 vocab_size,
                 hidden_size,
                 lr,
                 batch_size,
                 method="adadelta"):

        self.hidden_size = hidden_size
        self.threshold = utils.get_threshold(hidden_size, hidden_size)
        self.wordEmbedding = theano.shared(name="wordEmbedding",
                                           value=utils.init_randn(
                                               vocab_size, hidden_size),
                                           borrow=True)

        W = self.threshold * np.random.randn(3, hidden_size, hidden_size)
        U = self.threshold * np.random.randn(3, hidden_size, hidden_size)
        b = np.zeros((3, hidden_size))

        self.W = theano.shared(name="W",
                               value=W.astype(theano.config.floatX),
                               borrow=True)
        self.U = theano.shared(name="U",
                               value=U.astype(theano.config.floatX),
                               borrow=True)
        self.b = theano.shared(name="b",
                               value=b.astype(theano.config.floatX),
                               borrow=True)

        self.V = theano.shared(name="V",
                               value=np.zeros((hidden_size, vocab_size),
                                              dtype=theano.config.floatX),
                               borrow=True)

        self.lr = lr
        self.params = [self.wordEmbedding, self.W, self.U, self.b, self.V]

        if method == "adadelta":
            self.mean_delta_g2 = [
                theano.shared(np.zeros_like(param.get_value()))
                for param in self.params
            ]
            self.mean_delta_x2 = [
                theano.shared(np.zeros_like(param.get_value()))
                for param in self.params
            ]

        print 'ok1'
        index = T.imatrix("index")  # (max_sentence_size, batch_szie)
        x = self.wordEmbedding[
            index]  # (max_sentence_size, batch_size, hidden_size)
        mask = T.imatrix(
            "mask")  # (max_sentence_size, batch_size)  mask of input x
        y_expect = T.imatrix("y_expect")  # (max_sentence_size, batch_size)
        print 'ok2'

        h = self.forward(x)
        temp = T.dot(h, self.V)
        y, updates = theano.scan(fn=lambda item: T.nnet.softmax(item),
                                 sequences=temp)
        print 'ok6'
        print 'y: ', type(y), y.type

        loss, updates = theano.scan(fn=self.calcuCost,
                                    sequences=[y, y_expect, mask])
        print 'ok7'

        probSum = T.sum(loss)
        totalNum = T.sum(mask)
        cost = probSum / totalNum

        paramsGrads = [T.grad(cost, param) for param in self.params]
        if method == None:
            paramsUpdates = [(param, param + self.lr * g)
                             for param, g in zip(self.params, paramsGrads)]

        if method == "adadelta":
            delta_x_updates, delta_g2_updates, delta_x2_updates = self.adaDelta(
                paramsGrads, decay_rate=0.95, eps=1e-6)

            gradUpdates = [
                (param, param - delta_x)
                for param, delta_x in zip(self.params, delta_x_updates)
            ]
            g2Updates = [(oldValue, newValue) for oldValue, newValue in zip(
                self.mean_delta_g2, delta_g2_updates)]
            x2Updates = [(oldValue, newValue) for oldValue, newValue in zip(
                self.mean_delta_x2, delta_x2_updates)]

            paramsUpdates = gradUpdates + g2Updates + x2Updates

        self.train = theano.function(inputs=[index, y_expect, mask],
                                     outputs=[cost, probSum, totalNum],
                                     updates=paramsUpdates)

        self.predict = theano.function(inputs=[index, y_expect, mask],
                                       outputs=[probSum, totalNum])
Пример #32
0
    def segment(self, lp_image):
        print("Shape image", np.shape(lp_image))
        # Define type of license plate

        if lp_image.shape[0] / lp_image.shape[1] <= 0.6:
            type_lp = 'CarLong'
        else:
            type_lp = 'Square'

        #resize to format license plate
        lp_image = imutils.resize(lp_image, width=self.width_fixed_size)
        imagedraw = lp_image.copy()

        #threshold image
        gray_img = cv2.cvtColor(lp_image, cv2.COLOR_BGR2GRAY)
        gray_img = maximizeContrast(gray_img)
        threshed = get_threshold(gray_img)

        #find contours in image
        cnts, _ = cv2.findContours(threshed, cv2.RETR_LIST,
                                   cv2.CHAIN_APPROX_SIMPLE)
        # Find character in image
        if len(cnts) > 0:
            list_bbox = []
            for cnt in cnts:
                #Find bounding box
                boundRect = (boxX, boxY, boxW, boxH) = cv2.boundingRect(cnt)
                topleft = (int(boundRect[0]), int(boundRect[1]))
                bottomright = (int(boundRect[0] + boundRect[2]),
                               int(boundRect[1] + boundRect[3]))
                topright = (bottomright[0], topleft[1])
                bottomleft = (topleft[0], bottomright[1])
                fourpoint_rectangle = (topleft, topright, bottomright,
                                       bottomleft)

                #aspect Ratio: Ratio of width bounding box and height bounding box
                aspectRatio = boxW / float(boxH)
                #height Ratio: Ratio of height of bounding box and height of license plate
                heightRatio = boxH / float(lp_image.shape[0])
                keepAspectRatio = 0.05 < aspectRatio < 1.4
                keepHeight = 0.2 < heightRatio < 0.9
                n_pixel_white = np.sum(threshed[boxY:boxY + boxH,
                                                boxX:boxX + boxW] == 255)
                #Ratio of number pixels white in threshold roi and number of pixel in threshold roi
                whiteRatio = n_pixel_white / float(boxH * boxW)
                areaPolygon = boxW * boxH

                #Check if bouding box is of character
                if keepHeight and whiteRatio <= 0.95 and\
                        0.001 <= areaPolygon / (float(lp_image.shape[0]) * float(lp_image.shape[1])) <= 0.25\
                        and keepAspectRatio  and bbox_in_boundary_image(fourpoint_rectangle, lp_image):
                    boxCharacter = np.int0(fourpoint_rectangle)
                    boxCharacter = boxCharacter.tolist()
                    list_bbox.append(boxCharacter)

            #Delete bbox overlap with other
            print("Shape of bbox", np.shape(list_bbox))
            list_bbox_character = list_bbox.copy()
            for indexA in range(len(list_bbox) - 1):
                for indexB in range(indexA + 1, len(list_bbox)):
                    delete_bbox_overlap(indexA, indexB, list_bbox,
                                        list_bbox_character)
        # Remove bounding box is too small or have special shape
        list_bbox_character = remove_bbox_noise(list_bbox_character)
        list_extendbbox = []
        #Padding bounding box to have more large image of character
        for bbox in list_bbox_character:
            list_extendbbox.append(padding_rect(bbox))
        #Sorted bbox right to left, top to down
        print("List extend bbox: ", list_extendbbox)
        for bbox in list_extendbbox:
            cv2.rectangle(imagedraw, (bbox[0][0], bbox[0][1]),
                          (bbox[2][0], bbox[2][1]), (255, 0, 0), 2)
        cv2.imwrite('ututut.png', imagedraw)
        if len(list_extendbbox) >= 3 and len(list_extendbbox) <= 10:
            list_sorted = sort_bbox(type_lp, list_extendbbox)
            # print("Checking: ",len(list_sorted))
            return list_sorted

        elif len(list_extendbbox) < 3:
            return list_extendbbox
        else:
            return []
Пример #33
0
def train(data_loader, net, loss, epoch, optimizer, get_lr, save_dir,
          stats_path):
    start_time = time.time()
    net.train()
    lr = get_lr(epoch)
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr

    total_train_loss = []
    class_predict = []
    class_target = []
    for i, sample in enumerate(data_loader):
        data = sample['image']
        target_c = sample['label_c']
        target_s = sample['label_s']

        if args.model_name == "ResUNet101Index":
            batch_size = data.size(0)
            h = data.size(2)
            w = data.size(3)
            data = torch.cat(
                (data, sample['index'].float().unsqueeze(0).reshape(
                    (batch_size, 1, h, w))),
                dim=1)

        data = data.to(DEVICE)
        target_c = target_c.to(DEVICE)
        target_s = target_s.to(DEVICE)
        output_s, output_c = net(data)
        optimizer.zero_grad()
        cur_loss, _, _, c_p = loss(output_s, output_c, target_s, target_c)
        total_train_loss.append(cur_loss.item())
        class_target.append(target_c.detach().cpu().numpy())
        class_predict.append(c_p.detach().cpu().numpy())
        cur_loss.backward()
        optimizer.step()

    logging.info(
        'Epoch[%d], Batch [%d], total loss is %.6f, using %.1f s!' %
        (epoch, i, np.mean(total_train_loss), time.time() - start_time))
    total_train_class_predict = np.concatenate(class_predict, 0)
    total_train_class_target = np.concatenate(class_target, 0)
    adaptive_thresholds = get_threshold(total_train_class_predict,
                                        total_train_class_target, 0.995)
    cur_precision, _ = metric(total_train_class_predict,
                              total_train_class_target, adaptive_thresholds)
    logging.info(
        'Epoch[%d], [precision=%.4f, -->%.3f, -->%.3f, -->%.3f, -->%.3f]' %
        (epoch, np.mean(cur_precision), np.mean(cur_precision[0]),
         np.mean(cur_precision[1]), np.mean(
             cur_precision[2]), np.mean(cur_precision[3])))
    logging.info('the adaptive thresholds is [%.4f, %.4f, %.4f, %.4f]' %
                 (adaptive_thresholds[0], adaptive_thresholds[1],
                  adaptive_thresholds[2], adaptive_thresholds[3]))

    with open(stats_path, 'a') as f:
        writer = csv.writer(f)
        writer.writerow([
            epoch, i,
            np.mean(total_train_loss),
            np.mean(cur_precision),
            np.mean(cur_precision[0]),
            np.mean(cur_precision[1]),
            np.mean(cur_precision[2]),
            np.mean(cur_precision[3])
        ])

    return np.mean(total_train_loss), adaptive_thresholds
Пример #34
0
    def get_que_then_redirect(self):
        q = self.q
        q_id = self.q_id
        que_id = self.que_id
        db = self.db
        ans = self.ans
        session = self.session
        # 将是否重启测验设定为false
        session['is_%s_re_start' % q_id] = False
        # 下面是第一阶段抽题
        if session['%s_stage' % q_id] == 1:
            yield db.execute(
                "UPDATE answer SET order_answer=%s, score_answer=%s WHERE id=%s",
                (Json(ans.order_answer), Json(ans.score_answer), ans.aid))
            raise gen.Return('/cat/%s' % q_id)
        else:
            # 获取已作答项目参数
            self.a = np.array(session['%s_a' % q_id])
            self.b = np.array(session['%s_b' % q_id])
            self.score = np.array(session['%s_score' % q_id])

            # 计算潜在特质
            self.theta = self.get_theta()
            # 计算误差
            info = self.get_info()

            # 保存误差和潜在特质的值
            # 修改为适合json field
            ans.score_answer[str(que_id)].update({
                'info': info,
                'theta': self.theta
            })
            # 被试答题过程
            flow = Flow(flow=q.flow, name=session.session_key)

            if session['%s_stage' % q_id] == flow.level_len + 1:
                # 上面是结束规则
                yield db.execute(
                    "UPDATE answer SET theta=%s, info=%s, has_finished=%s,"
                    "order_answer=%s, score_answer=%s WHERE id=%s",
                    (self.theta, info, True, Json(
                        ans.order_answer), Json(ans.score_answer), ans.aid))

                # 删除所有测验相关session键值
                del_session(session, q_id)

                # 返回到问卷列表页面
                raise gen.Return('/result/%s' % q_id)
            else:
                # 第二阶段抽题
                que = yield get_level_others_items(session, q_id, self.theta,
                                                   self.get_shadow_bank(), ans,
                                                   db)
                session['q_%s_id' % q_id] = que
                level = session['%s_stage' % q_id]
                index_key = session['%s_step' % q_id] + 1
                ans.score_answer[str(que.id)] = {
                    'a_level': level,
                    'slop': que.slop,
                    'threshold': get_threshold(que)
                }
                session['%s_next_item' % q_id] = que
                ans.order_answer[index_key] = que.id
                yield db.execute(
                    "UPDATE answer SET order_answer=%s, score_answer=%s WHERE id=%s",
                    (Json(ans.order_answer), Json(ans.score_answer), ans.aid))
                raise gen.Return('/cat/%s' % q_id)