Ejemplo n.º 1
0
def main():

    delpath()
    h.makedirsx("./gen/kalman/")

    bbox = h.readmaarray(clippath + "/detections/clean")[:,2:6]
    gt = h.readmaarray(clippath + "/groundtruth/gt")[:,2:6]
    end = h.lastindex(gt)

    #mu = [-16,3,12,1]  
    mu = [0,0,0,0]
    std = [17,15,18,17]

    bbox = h.createfakedet(gt,mu,std)

    #for i in range(0, bbox.shape[0], 1):
    #    bbox.mask[i,:] = True

    modelinfo = np.array([["low",1],["medium", 10],["large",100]])

    for i in range(modelinfo.shape[0]):

        mem, memk, memp = kalman(bbox,gt, std, Q = modelinfo[i, 1])
        
        ylabel = ["xmin [px]","ymin [px]","xmax [px]","ymax [px]"]
        h.timeplot3(gt,bbox, mem, modelinfo[i,0] + "-results", [[0,1920],[1080,0],[0,400],[0,2]], ylabel, ["Frame [k]"]*4, savepath + "ot" + modelinfo[i,0])
        h.plotk(memk, savepath + modelinfo[i,0])
        h.plotp(memp, savepath + modelinfo[i,0])

        memioubefore = h.iouclip(bbox,gt)
        memiouafter = h.iouclip(mem, gt)
        print(i)
        print(np.mean(memioubefore)) 
        print(np.mean(memiouafter))
        print("--------")
        print(h.rmse(gt,bbox))
        print(h.rmse(gt,mem))
        print(np.mean(memk, axis = 0))
    #
    #
    # print "proportion of 0:", sum(y_test==0) / len(y_test)
    # print "proportion of 1:", sum(y_test==1) / len(y_test)
    # print "proportion of 2:", sum(y_test==2) / len(y_test)
    # print "proportion of 3:", sum(y_test==3) / len(y_test)

    # print sum(rf_preds == y_test) / len(y_test)

    # print "class 0 accuracy: ", sum(rf_preds[y_test == 0] == 0) / len(rf_preds[y_test == 0])
    # print "class 1 accuracy: ", sum(rf_preds[y_test == 1] == 1) / len(rf_preds[y_test == 1])
    # print "class 2 accuracy: ", sum(rf_preds[y_test == 2] == 2) / len(rf_preds[y_test == 2])
    # print "class 3 accuracy: ", sum(rf_preds[y_test == 3] == 3) / len(rf_preds[y_test == 3])

    print helper.corr(rf_preds, y_test)
    print helper.rmse(rf_preds, y_test)

    plt.scatter(rf_preds, y_test)
    plt.show()

    # y_pred_rf = rf.predict_proba(X_test)[:, 1]
    # fpr_rf, tpr_rf, _ = roc_curve(y_test, y_pred_rf)
    # print roc_auc_score(y_test, y_pred_rf)
    #
    # plt.figure(1)
    # plt.plot([0, 1], [0, 1], 'k--')
    # plt.plot(fpr_rf, tpr_rf, label='RF')
    # plt.xlabel('False positive rate')
    # plt.ylabel('True positive rate')
    # plt.title('ROC curve')
    # plt.legend(loc='best')
Ejemplo n.º 3
0
def conv_net_regressor(shape, use_additional_pool=False, bn_mom=0.9):
    # We have 2 data sources and concatenate them
    data_fixed = mx.sym.Variable(name='data_fixed')
    data_moving = mx.sym.Variable(name='data_moving')
    concat_data = mx.sym.concat(*[data_fixed, data_moving])
    batched = mx.sym.BatchNorm(data=concat_data,
                               fix_gamma=True,
                               eps=2e-5,
                               momentum=bn_mom,
                               name='bn_data')
    # The number of kernels per layer can be of arbitrary size, but the number of kernels of the output layer is
    # determined by the dimensionality of the input images
    filter_list = [16, 32, 64, 128]
    # four alternating layers of 3 × 3 convolutions with 0-padding and 2 × 2 downsampling layers
    for i in range(4):
        if i == 0:
            body = mx.sym.Convolution(data=batched,
                                      num_filter=filter_list[i],
                                      kernel=(3, 3),
                                      stride=(1, 1),
                                      pad=(0, 0),
                                      no_bias=True,
                                      name="conv" + str(i))
        else:
            body = mx.sym.Convolution(data=body,
                                      num_filter=filter_list[i],
                                      kernel=(3, 3),
                                      stride=(1, 1),
                                      pad=(0, 0),
                                      no_bias=True,
                                      name="conv" + str(i))
        body = mx.sym.BatchNorm(data=body,
                                fix_gamma=False,
                                eps=2e-5,
                                momentum=bn_mom,
                                name='bn' + str(i))
        # TO DO: the original authors use exponential linear units as activation
        body = mx.sym.LeakyReLU(data=body,
                                act_type='elu',
                                name='relu' + str(i))
        body = mx.sym.Pooling(data=body,
                              kernel=(2, 2),
                              stride=(1, 1),
                              pad=(1, 1),
                              pool_type='avg')
    # Subsequently, three 1 × 1 convolutional layers are applied to make the ConvNet regressor fully convolutional
    for k in range(2):
        i = k + 4
        body = mx.sym.Convolution(data=body,
                                  num_filter=256,
                                  kernel=(1, 1),
                                  stride=(1, 1),
                                  pad=(0, 0),
                                  no_bias=True,
                                  name="conv" + str(i))
        body = mx.sym.BatchNorm(data=body,
                                fix_gamma=False,
                                eps=2e-5,
                                momentum=bn_mom,
                                name='bn' + str(i))
        # body = mx.sym.Activation(data=body, act_type='relu', name='relu' + str(i))
        body = mx.sym.LeakyReLU(data=body,
                                act_type='elu',
                                name='relu' + str(i))
        if use_additional_pool:
            body = mx.sym.Pooling(data=body,
                                  kernel=(2, 2),
                                  stride=(2, 2),
                                  pad=(1, 1),
                                  pool_type='avg')
    #  body = mx.sym.Pooling(data=body, kernel=(2, 2), stride=(2, 2), pad=(1, 1), pool_type='avg')
    flatten = mx.sym.flatten(data=body)
    fc3 = mx.sym.FullyConnected(data=flatten, num_hidden=6)
    fc3 = mx.sym.Activation(data=fc3, act_type='tanh', name='tanh_after_fc')
    # The Spatial Transformer performs a affine transformation to the moving image,
    # parametrized by the output of the body network
    stnet = mx.sym.SpatialTransformer(data=data_moving,
                                      loc=fc3,
                                      target_shape=(shape[2], shape[3]),
                                      transform_type='affine',
                                      sampler_type="bilinear",
                                      name='SpatialTransformer')
    #cor = mx.sym.Correlation(data1=stnet, data2=data_fixed, kernel_size=28, stride1=2, stride2=2, pad_size=0, max_displacement=0)
    #cor2 = mx.sym.Correlation(data1=data_fixed, data2=stnet, kernel_size=28, stride1=1, stride2=1, max_displacement=0)
    # loss = mx.sym.MakeLoss(hlp.ncc(stnet, data_fixed), normalization='batch')
    loss = mx.sym.MakeLoss(hlp.rmse(stnet, data_fixed), normalization='batch')
    output = mx.sym.Group([
        mx.sym.BlockGrad(fc3),
        mx.sym.BlockGrad(stnet),
        mx.sym.BlockGrad(fc3), loss
    ])
    return output
Ejemplo n.º 4
0
rf = RandomForestClassifier(max_depth=20,
                            random_state=0,
                            n_estimators=2000,
                            oob_score=True,
                            n_jobs=-1,
                            verbose=0,
                            max_features='auto')

rf_model = rf.fit(X_train, y_train)

rf_pred = rf.predict(X_test)

rf_score = rf.score(X_test, y_test)

rmse(rf_pred, y_test), rf_model.oob_score_, rf_score

mean_squared_error(y_test, rf_pred)

#xgbMatrix_train = xgb.DMatrix(data=X_train, label=y_train)
#
#xgbMatrix_test = xgb.DMatrix(data=X_test)
#
#
#params = {'max_depth': 2, 'eta': 0.5, 'silent': 1, 'objective': 'reg:squarederror',
#          'nthread': 4, 'colsample_bytree': 0.7, 'subsample': 0.5,
#          'scale_pos_weight': 1, 'gamma': 5, 'learning_rate': 0.02,
#          'num_boost_round': 100}
#
#
#xgb_model = xgb.train(params, xgbMatrix_train)
Ejemplo n.º 5
0
 def compute_error(self, predict, target):
     return rmse(predict, target)