Ejemplo n.º 1
0
    def __init__(self, filepaths, imageSize):

        self.__trainData = []
        self.__trainLabel = []
        self.__testData = []
        self.__testLabel = []

        for filepath in filepaths:
            ds = datasetVOC(filepath, imageSize)
            train, test = ds.getDataset()
            trainData, trainLabel = train._datasets
            testData, testLabel = test._datasets
            self.__trainData.extend(trainData)
            self.__trainLabel.extend(trainLabel)
            self.__testData.extend(testData)
            self.__testLabel.extend(testLabel)
Ejemplo n.º 2
0
def run(datasetPath, resultPath, modelPath="", resumePath=""):
    # set dataset
    if isinstance(datasetPath, str):
        ds = datasetVOC(datasetPath, 32)
    elif isinstance(datasetPath, list):
        ds = datasetVOCs(datasetPath, 32)
    else:
        raise Exception("データセットパスの型が不正です。")
    train, test = ds.getDataset()

    # set model
    model = chainer.links.Classifier(Alex())
    if os.path.isfile(modelPath):
        chainer.serializers.load_npz(modelPath, model)
    optimizer = chainer.optimizers.Adam()
    optimizer.setup(model)

    # set evaluation model
    eval_model = model.copy()
    eval_model.train = False

    # train and test
    train_iter = chainer.iterators.SerialIterator(train, BATCH_SIZE)
    test_iter = chainer.iterators.SerialIterator(test, BATCH_SIZE, repeat=False, shuffle=False)
    updater = chainer.training.StandardUpdater(train_iter, optimizer, device=-1)
    trainer = chainer.training.Trainer(updater, (EPOCH, "epoch"), out=resultPath)
    trainer.extend(extensions.Evaluator(test_iter, eval_model, device=-1))
    trainer.extend(extensions.LogReport())
    trainer.extend(
        extensions.PrintReport(
            ["epoch", "main/loss", "validation/main/loss", "main/accuracy", "validation/main/accuracy"]
        )
    )
    trainer.extend(extensions.ProgressBar(update_interval=5))
    trainer.extend(extensions.snapshot(filename="snapshot_epoch_{.updater.epoch}"))
    trainer.extend(extensions.snapshot_object(model, filename="model_epoch_{.updater.epoch}"))
    trainer.extend(extensions.dump_graph("main/loss"))
    if os.path.isfile(resumePath):
        chainer.serializers.load_npz(resumePath, trainer)
    trainer.run()
Ejemplo n.º 3
0
def run(PATH):
    # set PATH
    Common.mkDir(PATH['RESULT'])

    # read model
    model = chainer.links.Classifier(Alex())
    model.train = False
    if not os.path.isfile(PATH['MODEL']):
        raise Exception("モデルが読み込めませんでした")
    chainer.serializers.load_npz(PATH['MODEL'], model)

    # set test dataset
    ds= datasetVOC(PATH['DATASET'], 32)
    tests, rawImages, GTbbBndBoxs, names = ds.getTestDataFromBING(PATH['DATASET-BING'], ".csv", 20000);

    # output summary
    CARNUM = []
    IDENTIFICATION_CARNUM = []
    PPNUM = []
    PNNUM = []
    NPNUM = []
    NNNUM = []

    # test
    for (i, name) in enumerate(names):

        # output detail
        PP = []             # 車両データを車両と認識したときTRUE
        PN = []             # 背景データを車両と認識したときTURE
        NP = []             # 車両データを背景と認識したときTRUE
        NN = []             # 背景データを背景と認識したときTRUE
        XMIN = []           # 矩形の最小X
        YMIN = []           # 矩形の最小Y
        XMAX = []           # 矩形の最大X
        YMAX = []           # 矩形の最大Y
        GROUNDTRUTH = []    # 正解ラベル
        PREDICTION = []     # モデルが算出したラベル

        images, labels, points = tests[i]._datasets
        rawImage = rawImages[i]
        bndboxs = GTbbBndBoxs[i]
        predictions = testModel(model, images, labels)
        identificationCar = getIdentificationBndboxFromProposal(bndboxs, points)

        for j in range(0, len(predictions)):
            PP.append((predictions[j] == 1 and labels[j] == 1))
            PN.append((predictions[j] == 1 and labels[j] == 0))
            NP.append((predictions[j] == 0 and labels[j] == 1))
            NN.append((predictions[j] == 0 and labels[j] == 0))
            XMIN.append(points[j][0])
            YMIN.append(points[j][1])
            XMAX.append(points[j][2])
            YMAX.append(points[j][3])
            GROUNDTRUTH.append(labels[j])
            PREDICTION.append(predictions[j])

        # output summary
        CARNUM.append(len(bndboxs))
        IDENTIFICATION_CARNUM.append(len(identificationCar))
        PPNUM.append(PP.count(True))
        PNNUM.append(PN.count(True))
        NPNUM.append(NP.count(True))
        NNNUM.append(NN.count(True))

        Common.writeCSV(PATH["RESULT"] + "/" + name + ".csv",
                        xmin=XMIN, ymin=YMIN, xmax=XMAX, ymax=YMAX, 
                        groudtruth=GROUNDTRUTH, prediction=PREDICTION)

    Common.writeCSV(PATH["RESULT"] + "/summary.csv",
                    name=names, carnum=CARNUM, pp=PPNUM,pn=PNNUM,np=NPNUM,nn=NNNUM,identificationcarnum=IDENTIFICATION_CARNUM)
Ejemplo n.º 4
0
def run(PATH):
    logging.debug("function start")

    # set PATH
    Common.mkDir(PATH['RESULT'])

    # read model
    model = chainer.links.Classifier(Alex())
    model.train = False
    if not os.path.isfile(PATH['MODEL']):
        raise Exception("モデルが読み込めませんでした")
    chainer.serializers.load_npz(PATH['MODEL'], model)

    # output summary
    CARNUM = []
    IDENTIFICATION_CARNUM = []
    TPnum = []
    FPnum = []
    FNnum = []
    TNnum = []
    TP_15num = []
    FP_15num = []
    TP_20num = []
    FP_20num = []
    TP_25num = []
    FP_25num = []
    TP_30num = []
    FP_30num = []

    # set test dataset
    bs = bingTestset(datasetVOC(PATH['DATASET'], 32, False), PATH['BING'])
    names = bs.getTestIdList()
    for name in names:
        logging.debug("start: " + name)

        # read data
        images, labels, points, rawImage, bndboxs = bs.getTestDataByIndex(name)
        
        # test
        predictions = testModel(model, images, labels)

        # output
        TP = []     # 車両データを車両と認識した座標
        FP = []     # 背景データを車両と認識した座標
        FN = []     # 車両データを背景と認識した座標
        TN = []     # 背景データを背景と認識した座標

        for j in range(0, len(predictions)):
            if (predictions[j] == 1 and labels[j] == 1):
                TP.append(points[j])
            elif(predictions[j] == 1 and labels[j] == 0):
                FP.append(points[j])
            elif(predictions[j] == 0 and labels[j] == 1):
                FN.append(points[j])
            elif(predictions[j] == 0 and labels[j] == 0):
                TN.append(points[j])

        # TP結果と正解データから出力してる。
        identificationCar = Common.getIdentificationBndboxFromProposal(bndboxs, TP)

        # TPとFPに対して,重複矩形の削除を行う。
        TP_015 = Common.removeOverlapRect(TP, 0.15)
        TP_020 = Common.removeOverlapRect(TP, 0.20)
        TP_025 = Common.removeOverlapRect(TP, 0.25)
        TP_030 = Common.removeOverlapRect(TP, 0.30)
        FP_015 = Common.removeOverlapRect(FP, 0.15)
        FP_020 = Common.removeOverlapRect(FP, 0.20)
        FP_025 = Common.removeOverlapRect(FP, 0.25)
        FP_030 = Common.removeOverlapRect(FP, 0.30)

        # output summary
        CARNUM.append(len(bndboxs))
        IDENTIFICATION_CARNUM.append(len(identificationCar))
        TPnum.append(len(TP))
        FPnum.append(len(FP))
        FNnum.append(len(FN))
        TNnum.append(len(TN))
        TP_15num.append(len(TP_015))
        FP_15num.append(len(FP_015))
        TP_20num.append(len(TP_020))
        FP_20num.append(len(FP_020))
        TP_25num.append(len(TP_025))
        FP_25num.append(len(FP_025))
        TP_30num.append(len(TP_030))
        FP_30num.append(len(FP_030))
        
        # write file
        if(len(TP)):
            Common.writeCSV(PATH["RESULT"] + "/" + name + "_TP.csv", xmin=np.array(TP).T[0], ymin=np.array(TP).T[1], xmax=np.array(TP).T[2], ymax=np.array(TP).T[3])
        if(len(FP)):
            Common.writeCSV(PATH["RESULT"] + "/" + name + "_FP.csv", xmin=np.array(FP).T[0], ymin=np.array(FP).T[1], xmax=np.array(FP).T[2], ymax=np.array(FP).T[3])
        if(len(FN)):
            Common.writeCSV(PATH["RESULT"] + "/" + name + "_FN.csv", xmin=np.array(FN).T[0], ymin=np.array(FN).T[1], xmax=np.array(FN).T[2], ymax=np.array(FN).T[3])
        if(len(TN)):
            Common.writeCSV(PATH["RESULT"] + "/" + name + "_TN.csv", xmin=np.array(TN).T[0], ymin=np.array(TN).T[1], xmax=np.array(TN).T[2], ymax=np.array(TN).T[3])

        if(len(TP_015)):
            Common.writeCSV(PATH["RESULT"] + "/" + name + "_TP(0.15).csv", xmin=np.array(TP_015).T[0], ymin=np.array(TP_015).T[1], xmax=np.array(TP_015).T[2], ymax=np.array(TP_015).T[3])
        if(len(FP_015)):
            Common.writeCSV(PATH["RESULT"] + "/" + name + "_FP(0.15).csv", xmin=np.array(FP_015).T[0], ymin=np.array(FP_015).T[1], xmax=np.array(FP_015).T[2], ymax=np.array(FP_015).T[3])
        if(len(TP_020)):
            Common.writeCSV(PATH["RESULT"] + "/" + name + "_TP(0.20).csv", xmin=np.array(TP_020).T[0], ymin=np.array(TP_020).T[1], xmax=np.array(TP_020).T[2], ymax=np.array(TP_020).T[3])
        if(len(FP_020)):
            Common.writeCSV(PATH["RESULT"] + "/" + name + "_FP(0.20).csv", xmin=np.array(FP_020).T[0], ymin=np.array(FP_020).T[1], xmax=np.array(FP_020).T[2], ymax=np.array(FP_020).T[3])
        if(len(TP_025)):
            Common.writeCSV(PATH["RESULT"] + "/" + name + "_TP(0.25).csv", xmin=np.array(TP_025).T[0], ymin=np.array(TP_025).T[1], xmax=np.array(TP_025).T[2], ymax=np.array(TP_025).T[3])
        if(len(FP_025)):
            Common.writeCSV(PATH["RESULT"] + "/" + name + "_FP(0.25).csv", xmin=np.array(FP_025).T[0], ymin=np.array(FP_025).T[1], xmax=np.array(FP_025).T[2], ymax=np.array(FP_025).T[3])
        if(len(TP_030)):
            Common.writeCSV(PATH["RESULT"] + "/" + name + "_TP(0.30).csv", xmin=np.array(TP_030).T[0], ymin=np.array(TP_030).T[1], xmax=np.array(TP_030).T[2], ymax=np.array(TP_030).T[3])
        if(len(FP_030)):
            Common.writeCSV(PATH["RESULT"] + "/" + name + "_FP(0.30).csv", xmin=np.array(FP_030).T[0], ymin=np.array(FP_030).T[1], xmax=np.array(FP_030).T[2], ymax=np.array(FP_030).T[3])

        random.shuffle(TP)
        random.shuffle(FP)
        random.shuffle(FN)
        random.shuffle(TN)

        Common.outputImage(PATH["RESULT"] + "/" + name + "_TP.jpg", rawImage, TP[0:min(2000, len(TP))])
        Common.outputImage(PATH["RESULT"] + "/" + name + "_FP.jpg", rawImage, FP[0:min(2000, len(FP))])
        Common.outputImage(PATH["RESULT"] + "/" + name + "_FN.jpg", rawImage, FN[0:min(2000, len(FN))])
        Common.outputImage(PATH["RESULT"] + "/" + name + "_TN.jpg", rawImage, TN[0:min(2000, len(TN))])
        Common.outputImage(PATH["RESULT"] + "/" + name + "_TP(0.15).jpg", rawImage, TP_015)
        Common.outputImage(PATH["RESULT"] + "/" + name + "_FP(0.15).jpg", rawImage, FP_015)
        Common.outputImage(PATH["RESULT"] + "/" + name + "_TP(0.20).jpg", rawImage, TP_020)
        Common.outputImage(PATH["RESULT"] + "/" + name + "_FP(0.20).jpg", rawImage, FP_020)
        Common.outputImage(PATH["RESULT"] + "/" + name + "_TP(0.25).jpg", rawImage, TP_025)
        Common.outputImage(PATH["RESULT"] + "/" + name + "_FP(0.25).jpg", rawImage, FP_025)
        Common.outputImage(PATH["RESULT"] + "/" + name + "_TP(0.30).jpg", rawImage, TP_030)
        Common.outputImage(PATH["RESULT"] + "/" + name + "_FP(0.30).jpg", rawImage, FP_030)

    Common.writeCSV(PATH["RESULT"] + "/summary.csv",
                    name=names, carnum=CARNUM,
                    tp=TPnum,fp=FPnum,fn=FNnum,tn=TNnum,
                    tp15=TP_15num, fp15=FP_15num,
                    tp20=TP_20num, fp20=FP_20num,
                    tp25=TP_25num, fp25=FP_25num,
                    tp30=TP_30num, fp30=FP_30num,
                    identificationcarnum=IDENTIFICATION_CARNUM
                    )