コード例 #1
0
    def forward(self, x, cuave0, cuave1, cuave2, cuave3):
        nPRB = self.nPRB
        F_ = self.conv1(x)
        F_0 = self.conv2(F_)
        cu0 = self.SI0(cuave0)
        cu1 = self.SI1(cuave1)
        cu2 = self.SI2(cuave2)
        cu3 = self.SI3(cuave3)
        F = [F_0]
        for i in range(0, nPRB):
            tmp = self.PRBs[i](F[i])
            if i == 1:
                tmp = tmp + cu0
            elif i == 3:
                tmp = tmp + cu1
            elif i == 5:
                tmp = tmp + cu2
            elif i == 7:
                tmp = tmp + cu3
            F.append(tmp)

        FF = F[1]
        for i in range(2, nPRB + 1):
            FF = torch.cat((FF, F[i]), 1)

        FdLF = self.GFF_1x1(FF)
        FGF = self.GFF_3x3(FdLF)
        FDF = FGF + F_
        output = self.conv_up(FDF)

        output = self.conv3(output)

        return output
コード例 #2
0
ファイル: function.py プロジェクト: Musclemanisme/HRNet
def myTestval(config,
              test_dataset,
              testloader,
              model,
              sv_dir='',
              sv_pred=False):
    J = []
    F = []

    model.eval()
    confusion_matrix = np.zeros(
        (config.DATASET.NUM_CLASSES, config.DATASET.NUM_CLASSES))
    with torch.no_grad():
        for index, batch in enumerate(tqdm(testloader)):
            image, label, _, name, *border_padding = batch
            size = label.size()
            pred = test_dataset.multi_scale_inference(
                config,
                model,
                image,
                scales=config.TEST.SCALE_LIST,
                flip=config.TEST.FLIP_TEST)

            if len(border_padding) > 0:
                border_padding = border_padding[0]
                pred = pred[:, :, 0:pred.size(2) - border_padding[0],
                            0:pred.size(3) - border_padding[1]]

            if pred.size()[-2] != size[-2] or pred.size()[-1] != size[-1]:
                pred = F.interpolate(pred,
                                     size[-2:],
                                     mode='bilinear',
                                     align_corners=config.MODEL.ALIGN_CORNERS)
            # import pdb
            # pdb.set_trace()
            output = pred.cpu().numpy().transpose(0, 2, 3, 1)
            seg_pred = np.asarray(np.argmax(output, axis=3),
                                  dtype=np.uint8).squeeze()
            seg_gt = np.asarray(label.cpu().numpy()[:, :size[-2], :size[-1]],
                                dtype=np.int).squeeze()
            J.append(db_eval_iou(seg_gt, seg_pred))
            F.append(db_eval_boundary(seg_pred, seg_gt))

            if sv_pred:
                sv_path = os.path.join(sv_dir, 'test_results')
                if not os.path.exists(sv_path):
                    os.mkdir(sv_path)
                test_dataset.save_pred(pred, sv_path, name)

        F = sum(F) / len(F)
        J = sum(J) / len(J)

    return F, J
コード例 #3
0
def HTF_Energy(m):  ###################    f1
    f = []
    lists = []
    for i in m:
        for j in i:
            j = j**2
            #print("%.14f"%j)
            f.append(j)
    global showFeatureValue
    #if (showFeatureValue):
    #print("Energy : "+str(sum(f)))
    return sum(f)
コード例 #4
0
 def _buildFL(self, paths):
     F = []
     L = []
     for c in range(self.n_class):
         f = []
         l = []
         for t in range(len(paths) // self.n_class):
             for path in paths[c + (t * self.n_class)]:
                 f.append(path[-1]['leaf'])
                 l.append(path[-1]['nodeid'])
         F.append(np.array(f))
         L.append(np.array(l))
     return F, L
コード例 #5
0
    def forward(self, x,cuave0,cuave1,cuave2,cuave3):
        nRDB = self.nRDB
        F_  = self.conv1(x)
        F_0 = self.conv2(F_)
        cu0 = self.SI0(cuave0)
        cu1 = self.SI1(cuave1)
        cu2 = self.SI2(cuave2)
        cu3 = self.SI3(cuave3)
        F = [F_0]
        for i in range(0,nRDB):
            tmp = self.RDBs[i](F[i])
            if i == 1:
                tmp = tmp + cu0
            elif i == 3:
                tmp = tmp + cu1
            elif i == 5:
                tmp = tmp + cu2
            elif i == 7:
                tmp = tmp + cu3
            F.append(tmp)

        FF = F[1]
        for i in range(2,nRDB+1):
            FF = torch.cat((FF,F[i]),1)
        # print(FF.shape)
        # F_2 = self.RDB2(F_1)
        # F_3 = self.RDB3(F_2)     
        # FF = torch.cat((F_1,F_2,F_3), 1)
        FdLF = self.GFF_1x1(FF)         
        FGF = self.GFF_3x3(FdLF)
        FDF = FGF + F_
        us = self.conv_up(FDF)
        us = self.upsample(us)

        output = self.conv3(us)


        return output
コード例 #6
0
 def forward(self, x):
     f = []
     x = self.conv1(x)
     x = self.bn1(x)
     x = self.relu(x)
     x = self.maxpool(x)
     x = self.layer1(x)
     f.append(x)
     x = self.layer2(x)
     f.append(x)
     x = self.layer3(x)
     f.append(x)
     x = self.layer4(x)
     f.append(x)
     # x = self.avgpool(x)
     # x = x.view(x.size(0), -1)
     # x = self.fc(x)
     '''
     f中的每个元素的size分别是 bs 256 w/4 h/4, bs 512 w/8 h/8, 
     bs 1024 w/16 h/16, bs 2048 w/32 h/32
     '''
     return x, f
コード例 #7
0
    def forward(self, x1,x2):
        #print(self.e_feat)
        edges,F,E_1,S_2,xx,E,S=[],[],[],[],[],[],[]
        num = 0
        for k in range(len(self.base)):
            #print(k)

            x1 = self.base[k](x1)

            if k in self.e_extract:
                xx.append(x1)
            #edges.append()
            #print(k,x.size())
            if k in self.extract:
                if num<2:

                    edge =self.e_feat[num](xx[2*num],xx[2*num+1])

                else:
                    edge = self.e_feat[num](xx[num*3-2],xx[num*3-1],xx[num*3])


                #edges.append(edge)


                (f,e_1,s_2)= self.feat[num](x1,edge)
                F.append(f)
                E_1.append(e_1)
                #E_2.append(e_2)
                #S_1.append(s_1)
                S_2.append(s_2)




                num += 1
        # side output
        #print(len(y3))


        a,b=self.feat[num](self.pool(x1))
        F.append(a)
        S_2.append(b)

        del xx
        xx =[]
        num=0
        for k in range(len(self.base)):
            #print(k)

            x2 = self.base[k](x2)

            if k in self.e_extract:
                xx.append(x2)
            #print(k,x.size())
            if k in self.extract:
                if num<2:

                    edge =self.e_feat[num](xx[2*num],xx[2*num+1])

                else:
                    edge = self.e_feat[num](xx[num*3-2],xx[num*3-1],xx[num*3])


                edges.append(edge)
                num+=1


        for i in range(5):
            edges[i] = self.up[i](edges[i])

            E.append(self.up1[i](E_1[i]))
            E[i] = nn.Sigmoid()(E[i])

            S.append(self.up2[i](S_2[i]))
            S[i] = nn.Sigmoid()(S[i])

        S.append(self.up2[5](S_2[5]))
        S[5] = nn.Sigmoid()(S[5])

        del S_2,E_1




        e_f = torch.cat([edges[0], edges[1], edges[2], edges[3], edges[4]], 1)
        edges.append(self.fuse(e_f))

        for i in range(6):
            edges[i]=nn.Sigmoid()(edges[i])






        return (F,edges,E,S)
コード例 #8
0
ファイル: Sets2Sets.py プロジェクト: zhenql/Sets2Sets
def evaluate(data_chunk, encoder, decoder, output_size, test_key_set,
             next_k_step, activate_codes_num):
    prec = []
    rec = []
    F = []
    prec1 = []
    rec1 = []
    F1 = []
    prec2 = []
    rec2 = []
    F2 = []
    prec3 = []
    rec3 = []
    F3 = []
    length = np.zeros(3)

    NDCG = []
    n_hit = 0
    count = 0

    for iter in range(len(test_key_set)):
        # training_pair = training_pairs[iter - 1]
        # input_variable = training_pair[0]
        # target_variable = training_pair[1]
        input_variable = data_chunk[past_chunk][test_key_set[iter]]
        target_variable = data_chunk[future_chunk][test_key_set[iter]]

        if len(target_variable) < 2 + next_k_step:
            continue
        count += 1
        output_vectors, prob_vectors = decoding_next_k_step(
            encoder, decoder, input_variable, target_variable, output_size,
            next_k_step, activate_codes_num)

        hit = 0
        for idx in range(len(output_vectors)):
            #for idx in [2]:
            vectorized_target = np.zeros(output_size)
            for ii in target_variable[1 + idx]:
                vectorized_target[ii] = 1

            vectorized_output = np.zeros(output_size)
            for ii in output_vectors[idx]:
                vectorized_output[ii] = 1

            precision, recall, Fscore, correct = get_precision_recall_Fscore(
                vectorized_target, vectorized_output)
            prec.append(precision)
            rec.append(recall)
            F.append(Fscore)
            if idx == 0:
                prec1.append(precision)
                rec1.append(recall)
                F1.append(Fscore)
            elif idx == 1:
                prec2.append(precision)
                rec2.append(recall)
                F2.append(Fscore)
            elif idx == 2:
                prec3.append(precision)
                rec3.append(recall)
                F3.append(Fscore)
            length[idx] += np.sum(target_variable[1 + idx])
            target_topi = prob_vectors[idx]
            hit += get_HT(vectorized_target, target_topi, activate_codes_num)
            ndcg = get_NDCG(vectorized_target, target_topi, activate_codes_num)
            NDCG.append(ndcg)
        if hit == next_k_step:
            n_hit += 1

    # print('average precision of subsequent sets' + ': ' + str(np.mean(prec)) + ' with std: ' + str(np.std(prec)))
    print('average recall' + ': ' + str(np.mean(rec)) + ' with std: ' +
          str(np.std(rec)))
    # print('average F score of subsequent sets' + ': ' + str(np.mean(F)) + ' with std: ' + str(np.std(F)))
    # print('average precision of 1st' + ': ' + str(np.mean(prec1)) + ' with std: ' + str(np.std(prec1)))
    # print('average recall of 1st' + ': ' + str(np.mean(rec1)) + ' with std: ' + str(np.std(rec1)))
    # print('average F score of 1st' + ': ' + str(np.mean(F1)) + ' with std: ' + str(np.std(F1)))
    # print('average precision of 2nd' + ': ' + str(np.mean(prec2)) + ' with std: ' + str(np.std(prec2)))
    # print('average recall of 2nd' + ': ' + str(np.mean(rec2)) + ' with std: ' + str(np.std(rec2)))
    # print('average F score of 2nd' + ': ' + str(np.mean(F2)) + ' with std: ' + str(np.std(F2)))
    # print('average precision of 3rd' + ': ' + str(np.mean(prec3)) + ' with std: ' + str(np.std(prec3)))
    # print('average recall of 3rd' + ': ' + str(np.mean(rec3)) + ' with std: ' + str(np.std(rec3)))
    # print('average F score of 3rd' + ': ' + str(np.mean(F3)) + ' with std: ' + str(np.std(F3)))
    print('average NDCG: ' + str(np.mean(NDCG)))
    print('average hit rate: ' + str(n_hit / len(test_key_set)))
コード例 #9
0
    c = char_x + c_pad
    ma = ma + mapad
    return x, y, c, f, mask, ma


X, Y, C, F, M, MA = [], [], [], [], [], []
for line in clean_data:
    x, y, c, f, m, ma = prepare_line_data(line, max_seq_len + 2,
                                          max_char_length)
    X.append(x)
    Y.append(y)
    C.append(c)
    M.append(m)
    MA.append(ma)
    F.append(f)
X = np.array(X)
Y = np.array(Y)
M = np.array(M)
MA = np.stack(MA)
MA = np.expand_dims(MA, 2)
print(MA.shape)
F = np.array(F)
C = np.array(C)
even_len = len(X) - len(X) % k_fold
indexes = np.arange(even_len)
np.random.shuffle(indexes)
X = X[indexes]
Y = Y[indexes]
M = M[indexes]
C = C[indexes]