コード例 #1
0
def distribution(directory, version, model, batch_size, resize):
    print("Calculate mean of distribution")
    #• directory = "Model-7"
    createFolder(directory)
    createFolder(directory + "\\" + version)
    dataSetPair = DataSetPairCreate(resize)
    dataSetPair.controlNormalize()

    pair_train = dataSetPair.pair_money_train
    pair_test = dataSetPair.pair_money_test
    pair_validation = dataSetPair.pair_money_val

    pair_money_train_loader = DataLoader(pair_train,
                                         batch_size=batch_size,
                                         num_workers=0,
                                         shuffle=True)
    pair_money_test_loader = DataLoader(pair_test,
                                        batch_size=batch_size,
                                        num_workers=0)
    pair_money_val_loader = DataLoader(pair_validation,
                                       batch_size=batch_size,
                                       num_workers=0)

    gaussian_distribition(directory,
                          version,
                          model,
                          pair_money_train_loader,
                          pair_money_val_loader,
                          pair_money_test_loader,
                          resize,
                          batch_size,
                          exp_name='model_1')
コード例 #2
0
def continue_siamese(directory,namefile,version,path,  exp_name,name,model, lr, epochs, momentum, batch_size,resize, margin, logs):
    
    siamese_reload, f, array_loss_train, array_loss_valid, array_sample_train, array_sample_valid,  array_acc_train, array_acc_valid,labels_train,prediction_train,labels_val,prediction_val=train_continue(directory,version,path,exp_name,name, model,lr, epochs, momentum,batch_size,resize, margin, logs)
    
    
    print("time Training\n ", f)
    print("Loss on train", array_loss_train[-1])
    print("Loss on valid\n", array_loss_valid[-1])
    directory= "Model-1-Continue"
    #controlla se è presente la directory, altrimenti la crei
    createFolder(directory)
    #plot 
    plotLoss(directory,namefile,array_loss_train, array_loss_valid, array_sample_train, array_sample_valid)
    plotAccuracy(directory,namefile,array_acc_train,array_acc_valid,array_sample_train,array_sample_valid)
                    
    scoresTrain= calculateScore(labels_train,prediction_train)
    scoresValid = calculateScore(labels_val,prediction_val)
                    
    print("Score on data Train...")
    print("Accuarcy di train: %0.4f"% scoresTrain[0])
    print("Precision di train: %0.4f"% scoresTrain[1])
    print("Recall di train: %0.4f"% scoresTrain[2])
    print("mF1 score di train: %0.4f"% scoresTrain[3])
    
    print("Score on dataValid...")
    print("Accuarcy di validation: %0.4f"% scoresValid[0])
    print("Precision di validation: %0.4f"% scoresValid[1])
    print("Recall di validation: %0.4f"% scoresValid[2])
    print("mF1 score di validation: %0.4f"% scoresValid[3])
    
    valueTime = readJson(directory+"/ModelTrained.json",version,"time","training")
                  
    epoche = readJson(directory+"/ModelTrained.json",version,"hyperparametr","indexEpoch")
                    
    if not valueTime is None:
        f = float(valueTime) + float(f)
                    
    if not epoche is None:
        epochs = epochs + epoche
                        
    hyperparametr = {"indexEpoch":epochs,"lr":lr, "momentum" : momentum, "numSampleTrain": len(labels_train) }
    contrastiveLoss = {"lossTrain": array_loss_train[-1], "lossValid": array_loss_valid[-1] }
    accuracy = {"accuracyTrain":scoresTrain[0] , "accuracyValid":scoresValid[0] }
    precision = {"precisionTrain":scoresTrain[1]  , "precisionValid":scoresValid[1] }
    recall = {"recallTrain":scoresTrain[2] , "recallValid":scoresValid[2] }
    f1score = {"f1_score_Train":scoresTrain[3] , "f1_score_Valid":scoresValid[3]}
    time = {"training": str(f)}
    
    
    writeJsonModel(directory,name,version, hyperparametr, batch_size, contrastiveLoss, accuracy ,f1score, precision, recall, time)   
コード例 #3
0
def classificazione(directory,filename, version,exp_name,name, model,lr, epochs,  momentum, batch_size, resize):
    print("Classificazione")
    #directory "Class"
    directory =directory
    version=version
    lr=lr
    epochs=epochs
    momentum=momentum
    batch_size = batch_size
    resize=resize
    controlFileCSVBase()
    
    dataSetClass = DatasetClassi(resize)
    dataSetClass.controlNormalize()
    
    train = dataSetClass.dataset_train_norm
    validation = dataSetClass.dataset_valid_norm
    test = dataSetClass.dataset_test_norm
    print("Numeri campioni",len(train))
    createFolder(directory)
    createFolder(directory+"\\"+version)
    writeJsonModelInit1(directory,name,version) 
    
    money_train_loader = DataLoader(train, batch_size=batch_size, num_workers=0, shuffle=True)
    money_test_loader = DataLoader(test, batch_size=batch_size, num_workers=0)
    money_val_loader = DataLoader(validation , batch_size = batch_size, num_workers=0)
    print("Numero di batch", len(money_train_loader))
    modello ,f, last_loss_train, last_loss_val, last_acc_train, last_acc_val = train_class(directory,version, model, money_train_loader, money_val_loader,resize, batch_size, exp_name , lr=lr, epochs = epochs)
    print("Time computing", f)
    print("last_loss_train",last_loss_train)
    print("last_loss_val",last_loss_val)
    print("last_acc_train",last_acc_train)
    print("last_acc_val",last_acc_val)
    
    hyperparametr = {"indexEpoch":epochs-1,"lr":lr, "momentum" : momentum, "batchSize":batch_size }
    contrastiveLoss = {"lossTrain": last_loss_train, "lossValid":last_loss_val}
    accuracy = {"accuracyTrain":last_acc_train , "accuracyValid":last_acc_val }
    time = {"training": f}
    
    
    writeJsonModelClass(directory,name,version, hyperparametr,resize,batch_size, contrastiveLoss, accuracy ,time)
    
    path = exp_name+".pth"
    model_test = torch.load(path)
    
    """
コード例 #4
0
ファイル: DatasetClassi.py プロジェクト: rroosa/machineL
    def controlNormalize(self):
        #controlla se è presente la directory, altrimenti la crea
        createFolder(self.path)
        #print("Controll")
        if not os.path.exists(self.path + '\\dataSetJson.json'):
            print("1) Checking: mean, dev_std")
            self.run()

        else:  # se il file esiste controlla se ci sono le key mean e dev

            try:
                with open(self.path + "\\dataSetJson.json", "r") as json_file:
                    data = json.load(json_file)
                print(self.path + "\\dataSetJson.json")
                if not (data.get('normalize') is None):
                    #print("Qui")
                    norm = data['normalize']

                    if not (norm.get('mean') and norm.get('dev_std')) is None:

                        response = input(
                            "Do you want to re-calculate the mean and standard deviation? y | n : "
                        )
                        if (response == "y"):
                            print("recalculate")
                            self.run()
                        elif (response == "n"):
                            print("bypass this step!!")
                            self.mean = tuple(norm['mean'])
                            print(self.mean)
                            self.dev_std = tuple(norm['dev_std'])
                            print(self.dev_std)
                            self.normalizeDataSet()
                            return
                        else:
                            self.controlNormalize()
                    else:
                        self.run()
                else:
                    self.run()
            except:
                # se il parsing è errato ricalcola la media e
                sys.stderr.write("Error parsing")
                exit(0)
コード例 #5
0
ファイル: DataSetPairCreate.py プロジェクト: rroosa/machineL
    def controlMargin(self):
        #controlla se è presente la directory, altrimenti la crea
        createFolder(self.path)
        #print("Controll")
        if not os.path.exists(self.path + '\\dataSetJson.json'):
            print("1) Calculate mean margine")
            self.runMargin()

        else:  # se il file esiste controlla se ci sono le key mean e dev
            res = str(self.resize)
            try:
                with open(self.path + "\\dataSetJson.json", "r") as json_file:
                    data = json.load(json_file)
                print(self.path + "\\dataSetJson.json")
                if not (data.get('margineMean_' + res) is None):
                    #print("Qui")
                    margin = data['margineMean_' + res]

                    response = input(
                        "Do you want to re-calculate margin? y | n : ")
                    if (response == "y"):
                        print("recalculate")
                        self.runMargin()
                    elif (response == "n"):
                        print("bypass this step")
                        self.margin = margin

                    else:
                        self.controlMargin()

                else:
                    self.runMargin()
            except:
                # se il parsing è errato ricalcola la media e
                sys.stderr.write("Error parsing")
                exit(0)
コード例 #6
0
ファイル: project_money.py プロジェクト: rroosa/machineL
def main(argv):

    #crazione file "dataSet.json" se non esiste
    entry = {"nameDB": "Moneys"}
    controlFolder("Dataset")
    creteFileJson("Dataset\dataSetJson.json", entry)
    data_create = DataSetCreate()

    #name_id = data_create.name_classes_id()
    #list_all_images = data_create.list_all_images()
    #num_tot_files = data_create.num_total_file()

    parser = argparse.ArgumentParser(description="Dataset Money")

    parser.add_argument('--create',
                        help="datasetBase | datasetLarge | datasetPair")
    parser.add_argument(
        '--info', help="dataset | datasetBase | datasetLarge | datasetPair")
    #parser.add_argument('--training', help="1")
    parser.add_argument('--test',
                        help="Name of model [model5 | model6 ]",
                        type=str)

    parser.add_argument('--train',
                        help="Name of model [model5 | model6 ]",
                        type=str)
    parser.add_argument('--v', help="version", type=int)

    parser.add_argument('--progress',
                        help="Name of model [model5 | model6 ]",
                        type=str)
    parser.add_argument('--file', help="name file .pth", type=str)

    parser.add_argument('--e', help="epoche", type=int)
    parser.add_argument('--margine', help="dim of resize", type=int)

    parser.add_argument('--classification',
                        help="[ train | test | continue | demo ]",
                        type=str)

    parser.add_argument('--classtest', help="classTest")
    parser.add_argument('--demo', help="[ model5 | model6 ]")
    parser.add_argument('--pair', help=" insert id pair [0 - 13824]", type=int)
    parser.add_argument('--soglia', help="soglia", type=float)
    parser.add_argument('--margin', help="margin", type=float)

    parser.add_argument('--path', help="path of model '.pth'", type=str)

    parser.add_argument('--distribution',
                        help="distribuzione dei dati di train allenati")

    parser.add_argument('--pathModel',
                        help="percorso modello da inizializzare")

    parser.add_argument('--margin1', help="margine 1", type=float)

    parser.add_argument('--margin2', help="margine 2", type=float)

    parser.add_argument('--roc', help="roc")
    argomento = parser.parse_args()

    #-------- DISTRIBUTION------

    required_together_distrib = ('distribution', 'v')
    if argomento.distribution is not None:
        # args.model will be None if v is not provided
        if not all([getattr(argomento, x) for x in required_together_distrib]):
            raise RuntimeError("Cannot supply --distribution without --v ")
        else:

            #------  MODEL 6
            if argomento.distribution == "model6":
                print("DISTRIBUTION model ", argomento.distribution)
                #------       MODEL 6 v 2
                if argomento.v == 2:
                    print("version v2")
                    directory = "Model-6"
                    version = "2"
                    resize = 100
                    batch_size = 16
                    createFolder(directory)
                    createFolder(directory + "\\" + version)
                    dataSetPair = DataSetPairCreate(resize)
                    dataSetPair.controlNormalize()
                    pair_train = dataSetPair.pair_money_train

                    pair_money_train_loader = DataLoader(pair_train,
                                                         batch_size=batch_size,
                                                         num_workers=0,
                                                         shuffle=True)
                    path = directory + "\\" + version + "\\modello6_v2_6.pth"
                    gaussian_distribution_train_margine_single(
                        directory, version, pair_money_train_loader, resize,
                        batch_size, path)
                else:
                    exit(0)

#--------------------------- DEMO -------------------------------

    required_together_demo = ('demo', 'v', 'pair')
    if argomento.demo is not None:
        # args.model will be None if v is not provided
        if not all([getattr(argomento, x) for x in required_together_demo]):
            raise RuntimeError("Cannot supply --demo without --v --pair")
        else:

            #------  MODEL 5
            if argomento.demo == "model5":
                print("Demo model ", argomento.demo)

                if argomento.v == 5:
                    print("version v5")
                    print("model5 v5 ResNet siamese classification SGD")
                    directory = "Model-5\\"
                    path = 'modello5_v5.pth'
                    version = "5"
                    idPair = argomento.pair
                    # verifica l'id corrispondente alla coppia se è presente
                    resize = 100
                    demo_obj = Demo(directory, version, resize)
                    demo_obj.controlPair(idPair)
                    demo_obj.read_normalize()
                    dizionario = demo_obj.getitem(idPair)

                    siamese_test = torch.load(path)

                    demo_obj.test_demo(dizionario, siamese_test)
                    demo_obj.plottare()

                elif argomento.v == 7:
                    print("version v7")
                    print(
                        "DEMO model5 v7, Marek Net siamese classification SGD")
                    directory = "Model-5\\"
                    version = "7"
                    path = directory + version + "\\" + 'modello5_v7_17.pth'

                    idPair = argomento.pair
                    # verifica l'id corrispondente alla coppia se è presente
                    resize = 100
                    demo_obj = Demo(directory, version, resize)
                    demo_obj.controlPair(idPair)
                    demo_obj.read_normalize()
                    dizionario = demo_obj.getitem(idPair)

                    siamese_test = torch.load(path)

                    demo_obj.test_demo(dizionario, siamese_test)
                    demo_obj.plottare()

                else:
                    print("Versione del model5 non riconosciuta")
                    sys.stderr.write(
                        "Version not acknowledged, try --train model5 --v [ 5 | 7 ]\n"
                    )
                    exit(0)

            # --DEMO ---- MODEL 6
            elif argomento.demo == "model6":
                print("Demo model ", argomento.demo)

                #------DEMO---  MODEL 6 v 2
                if argomento.v == 2:
                    print("version v2")
                    print("model6 v2 ResNet, single margine=2.0, soglia=0.92")
                    directory = "Model-6\\"
                    version = "2"
                    path = directory + version + "\\" + 'modello6_v2_6.pth'

                    idPair = argomento.pair
                    resize = 100
                    demo_obj = Demo(directory, version, resize)
                    demo_obj.controlPair(idPair)
                    demo_obj.read_normalize()
                    dizionario = demo_obj.getitem(idPair)

                    siamese_test = torch.load(path)
                    soglia = 0.92
                    dist = demo_obj.test_demo_single_margine(
                        dizionario, siamese_test, soglia)
                    demo_obj.plottare(dist)

                elif argomento.v == 4:
                    print("version v2")
                    print("model6 v4 ResNet, double margine=0.7 e 1.3")
                    directory = "Model-6\\"
                    version = "4"
                    path = directory + version + "\\" + 'modello6_v4_51.pth'

                    idPair = argomento.pair
                    resize = 100
                    demo_obj = Demo(directory, version, resize)
                    demo_obj.controlPair(idPair)
                    demo_obj.read_normalize()
                    dizionario = demo_obj.getitem(idPair)

                    siamese_test = torch.load(path)
                    margin1 = 0.7
                    margin2 = 1.2
                    dist = demo_obj.test_demo_double_margine(
                        dizionario, siamese_test, margin1, margin2)
                    demo_obj.plottare(dist)

                else:
                    print("Versione del model6 non riconosciuta")
                    sys.stderr.write(
                        "Version not acknowledged, try --train model6 --v [ 2 | 4 ]\n"
                    )
                    exit(0)

            else:
                print("Modello non riconosciuto")
                sys.stderr.write(
                    "Model not acknowledged, try --train [ model5 | model6 ]\n"
                )
                exit(0)

# --------------------TRAIN------CLASSIFICAZIONE DEI DATI ---------------------

    elif argomento.classification == "train":
        required_together = ('classification', 'v', 'e')
        if not all([getattr(argomento, x) for x in required_together]):
            raise RuntimeError(
                "Cannot supply --classification train without --v --e")
        else:
            #-------MODEL MNET
            if argomento.v == 2:
                epochs = 20
                lr = 0.0001
                momentum = 0.9
                batch_size = 16
                resize = 100
                if argomento.e is not None:
                    epochs = argomento.e

                directory = "Classe"
                filename = "//class"
                version = "2"
                exp_name = 'class_2'
                name = 'ModelM'
                model = ModelM()

                classificazione(directory, filename, version, exp_name, name,
                                model, lr, epochs, momentum, batch_size,
                                resize)

            #--------MODEL RESNET
            elif argomento.v == 1:
                print("Resnet")
                lr = 0.0001
                momentum = 0.9
                batch_size = 16
                resize = 256
                if argomento.e is not None:
                    epochs = argomento.e
                directory = "Classe"
                filename = "//class"
                version = "1"
                exp_name = 'class_1'
                name = 'ResNet'

                model = resnet34(pretrained=True)
                resnet_copy = deepcopy(model)

                ### adattamento
                num_class = 5
                resnet_copy.fc = nn.Linear(512, num_class)
                resnet_copy.num_classes = num_class
                print(resnet_copy)

                classificazione(directory, filename, version, exp_name, name,
                                resnet_copy, lr, epochs, momentum, batch_size,
                                resize)

            else:
                print("Versione non riconosciuta")
                sys.stderr.write(
                    "Version not acknowledged, try --classification train --v [ 1 | 2 ]\n"
                )
                exit(0)

#-------------------TEST ------CLASSIFICAZIONE DEI DATI

#---- test su data set di base
    elif argomento.classification == "test":
        required_together = ('classification', 'v')
        if not all([getattr(argomento, x) for x in required_together]):
            raise RuntimeError(
                "Cannot supply --classification test without --v")
        else:
            if argomento.v == 2:
                print("MNet classification version 2")
                directory = "Classe"

                version = "2"
                batch_size = 16
                resize = 100
                name = 'ModelM'

                if argomento.pathModel is not None:
                    path_dict = argomento.pathModel
                else:
                    path_dict = 'Classe//2//class_2_44.pth'

                testing_classificazione(directory, path_dict, version, resize,
                                        batch_size)

            elif argomento.v == 1:
                print("Resnet classification version 1")

                directory = "Classe"

                version = "1"
                if argomento.pathModel is not None:
                    path_dict = argomento.pathModel
                else:
                    path_dict = 'Classe//1//class_1_19.pth'
                name = 'ResNet'
                batch_size = 4
                resize = 256
                testing_classificazione(directory, path_dict, version, resize,
                                        batch_size)

            else:
                print("Versione non riconosciuta")
                sys.stderr.write(
                    "Version not acknowledged, try --classification test --v [ 1 | 2 ]\n"
                )
                exit(0)

#---------------TEST   su datasetPair con classificazione Manuale

    elif argomento.classification == "testPair":
        required_together = ('classification', 'v')
        if not all([getattr(argomento, x) for x in required_together]):
            raise RuntimeError(
                "Cannot supply --classification testPair without --v")
        else:
            if argomento.v == 2:

                directory = "Classe"

                version = "2"
                batch_size = 16
                resize = 100
                name = 'ModelM'

                if argomento.pathModel is not None:
                    path_dict = argomento.pathModel
                else:
                    path_dict = 'Classe//2//class_2_44.pth'

                testing_classificazionePair(directory, path_dict, version,
                                            resize, batch_size)

            elif argomento.v == 1:
                print("Resnet classification version 1")

                directory = "Classe"

                version = "1"
                if argomento.pathModel is not None:
                    path_dict = argomento.pathModel
                else:
                    path_dict = 'Classe//1//class_1_19.pth'
                name = 'ResNet'
                batch_size = 4
                resize = 256
                testing_classificazionePair(directory, path_dict, version,
                                            resize, batch_size)

            else:
                print("Versione non riconosciuta")
                sys.stderr.write(
                    "Version not acknowledged, try --classification testPair --v [ 1 | 2 ]\n"
                )
                exit(0)

#-------------------CONTINUE ------CLASSIFICAZIONE DEI DATI

    elif argomento.classification == "continue":
        required_together = ('classification', 'v', 'e')
        if not all([getattr(argomento, x) for x in required_together]):
            raise RuntimeError(
                "Cannot supply --classification continue without --v --e")
        else:

            if argomento.v == 2:
                print("MNet classification continue version 2")
                directory = "Classe"
                exp_name = 'class_2'
                version = "2"
                lr = 0.0001
                momentum = 0.9
                batch_size = 16
                resize = 100

                name = 'ModelM'

                if argomento.pathModel is not None:
                    path_dict = argomento.pathModel
                else:
                    path_dict = 'Classe//2//class_2_19.pth'

                model = torch.load(path_dict)
                epoche_avanza = argomento.e
                continue_classificazione(directory, model, version, exp_name,
                                         name, lr, momentum, resize,
                                         batch_size, epoche_avanza)

            elif argomento.v == 1:
                print("Resnet classification continue version 1")

                directory = "Classe"
                version = "1"
                batch_size = 4
                resize = 256
                lr = 0.0001
                momentum = 0.9
                exp_name = 'class_1'
                name = 'ResNet'
                if argomento.pathModel is not None:
                    path_dict = argomento.pathModel
                else:
                    path_dict = 'Classe//1//class_1_19.pth'

                model = torch.load(path_dict)
                epoche_avanza = argomento.e

                continue_classificazione(directory, model, version, exp_name,
                                         name, lr, momentum, resize,
                                         batch_size, epoche_avanza)

            else:
                print("Versione non riconosciuta")
                sys.stderr.write(
                    "Version not acknowledged, try --classification continue --v [ 1 | 2 ]\n"
                )
                exit(0)

# --------------- DEMO ------------------CLASSIFICAZIONE MANUAL
    elif argomento.classification == "demo":

        required_together = ('classification', 'v', 'pair')
        if not all([getattr(argomento, x) for x in required_together]):
            raise RuntimeError(
                "Cannot supply --classification demo without --v --pair")
        else:

            #----- MODEL RESNET
            if argomento.v == 1:
                print("Classification Manual ResNet")
                if argomento.pathModel is not None:
                    path = argomento.pathModel
                else:
                    path = 'Classe\\1\\class_1_19.pth'
                directory = "Classe\\"
                version = "1"
                idPair = argomento.pair

                resize = 256
                demo_obj = Demo(directory, version, resize)
                demo_obj.controlPair(idPair)
                demo_obj.read_normalize()
                dizionario = demo_obj.getitem(idPair)

                class_test = torch.load(path)

                demo_obj.test_demo_order_manual(dizionario, class_test)
                demo_obj.plottare()

            #----- MODEL MNET
            elif argomento.v == 2:
                directory = "Classe\\"
                if argomento.pathModel is not None:
                    path = argomento.pathModel
                else:
                    path = 'Classe\\2\\class_2_44.pth'

                version = "2"
                idPair = argomento.pair
                # verifica l'id corrispondente alla coppia se è presente
                resize = 100
                demo_obj = Demo(directory, version, resize)
                demo_obj.controlPair(idPair)
                demo_obj.read_normalize()
                dizionario = demo_obj.getitem(idPair)

                class_test = torch.load(path)

                demo_obj.test_demo_order_manual(dizionario, class_test)
                demo_obj.plottare()
                return

            else:
                print("Versione non riconosciuta")
                sys.stderr.write(
                    "Version not acknowledged, try --classification demo --v [ 1 | 2 ]\n"
                )
                exit(0)

#------------ CREAZIONE DEI DATASET --create

    if argomento.create == "datasetBase":  # creazione dataset di Base
        data_create.create_Dataset_Base()

    elif argomento.create == "datasetLarge":  # creazione dataset di Base e datasetLarge
        data_create.create_Dataset_Large()

    elif argomento.create == "datasetPair":
        # controlla se è presente il dataset splittato
        controlFileCSV()
        dataSetPair = DataSetPairCreate()
        dataSetPair.controlNormalize()

#-------------   INFORMAZIONI SUI DATASET  --info

    data_set_info = argomento.info
    if (data_set_info == "dataset"):
        #oggetto DataSetCreate
        print("Dataset of base\n")
        #lettura da file Dataset\dataSetJson.json
        info = readFileDataset("Dataset\dataSetJson.json", "dataset")
        #info = data_create.info_classes()
        for i in info:
            print(i)
        num = lengthDataset("Dataset\dataSetJson.json", "dataset",
                            "num_images")
        print("Length Dataset of Base = ", num)
        print("\n")

    elif (data_set_info == "datasetBase"):
        print("Dataset Base\n")
        #info = data_create.info_datasetLarge()
        info = readFileDataset("Dataset\dataSetJson.json", "datasetBase")
        for i in info:
            print(i)
        num = lengthDataset("Dataset\dataSetJson.json", "datasetBase",
                            "num_sample")
        print("Length DatasetBase = ", num)

    elif (data_set_info == "datasetLarge"):
        print("Dataset Large\n")
        #info = data_create.info_datasetLarge()
        info = readFileDataset("Dataset\dataSetJson.json", "datasetLarge")
        for i in info:
            print(i)
        num = lengthDataset("Dataset\dataSetJson.json", "datasetLarge",
                            "num_sample")
        print("Length DatasetLarge = ", num)

    elif (data_set_info == "datasetPair"):

        print("DatasetPair\n")
        info = readFileDataset("Dataset\dataSetJson.json", "dataSetPair")
        for i in info:
            print(i)

#--------------FASE TRAINING OF MODEL 5 and 6 --train

    required_together = ('train', 'v', 'e')
    if argomento.train is not None:

        if not all([getattr(argomento, x) for x in required_together]):
            raise RuntimeError("Cannot supply --train without --v --e")
        else:

            #------  MODEL 5
            if argomento.train == "model5":
                if argomento.v == 7:
                    # siamese con trasfer-learning Mnet usata pe rla classigìficazione
                    # tolto il livello per la classificazione a 2 classi
                    # nuovo- inizializzazione class_2 epoche 44
                    lr = 0.0001
                    momentum = 0.9
                    resize = 100
                    epochs = 20
                    if argomento.e is not None:
                        epochs = argomento.e
                    batch_size = 4
                    directory = "Model-5"
                    filename = "//5_v7"
                    version = "7"
                    exp_name = 'modello5_v7'
                    name = 'MNet'

                    #inizializzazione del modello con parametri di MNet
                    path = "class_2.pth"
                    model = torch.load(path)
                    model_copy = deepcopy(model)
                    fully_connect = model_copy.fc
                    fully = list(fully_connect)
                    fully.pop()
                    model_copy.fc = nn.Sequential(*fully)
                    # adattamento
                    model_copy.fc2 = nn.Sequential(nn.Linear(512, 2))
                    print(model_copy)

                    train_model_class_v1(directory, filename, version,
                                         exp_name, name, model_copy, lr,
                                         epochs, momentum, batch_size, resize)

                elif argomento.v == 5:
                    # siamese con trasfer-learning Resnet usato per la classificazione
                    # e tolto l'ultimo livello
                    # aggiunto per prendere in ingresso la concatenazione degli output
                    # e aggiunto il livello per la classificazione a 2 classi
                    # la loss function è la CrossEntropy per la classificazione 0 e 1
                    lr = 0.0001
                    momentum = 0.9
                    resize = 100
                    epochs = 20
                    if argomento.e is not None:
                        epochs = argomento.e
                    batch_size = 4
                    decay = 0.0004
                    directory = "Model-5"
                    filename = "//5_v5"
                    version = "5"
                    exp_name = 'modello5_v5'
                    name = 'ResNet_Class'

                    # inizializzazione
                    model = torch.load("class_1.pth")
                    model_copy = deepcopy(model)

                    ### adattamento
                    num_class = 256
                    model_copy.fc = nn.Linear(512, num_class)
                    model_copy.num_classes = num_class
                    print(model_copy)

                    model_copy.fc2 = nn.Sequential(nn.Linear(512, 2))
                    print(model_copy)

                    train_model_class_v1(directory,
                                         filename,
                                         version,
                                         exp_name,
                                         name,
                                         model_copy,
                                         lr,
                                         epochs,
                                         momentum,
                                         batch_size,
                                         resize,
                                         decay=decay,
                                         modeLoss=None,
                                         dizionario_array=None)

                else:
                    print("Versione non riconosciuta")
                    sys.stderr.write(
                        "Version not acknowledged, try --train model5 --v [ 5 | 7 ]\n"
                    )
                    exit(0)

                #-----train MODEL6  siamese
            elif argomento.train == "model6":
                #----- Resnet - single margine
                if argomento.v == 2:
                    # siamese con trasfer-learning Resnet usato per la classificazione
                    # e tolto il livello per la classivicazione a 5 classi
                    # e inserito quello da 256
                    #
                    # la loss function è la Contrastive loss , margine

                    decay = 0.0004
                    lr = 0.0001
                    momentum = 0.9
                    resize = 100
                    epochs = 20
                    if argomento.e is not None:
                        epochs = argomento.e
                    batch_size = 4
                    directory = "Model-6"
                    filename = "//6_v2"
                    version = "2"
                    exp_name = 'modello6_v2'
                    name = 'RestNet_Margine'

                    # Usato per la classificazione a 5 classi, fine-tuning Resnet34
                    model = torch.load("class_1.pth")
                    model_copy = deepcopy(model)

                    ### adattamento
                    num_class = 256
                    model_copy.fc = nn.Linear(512, num_class)
                    model_copy.num_classes = num_class
                    print(model_copy)

                    train_model_margine(directory,
                                        filename,
                                        version,
                                        exp_name,
                                        name,
                                        model_copy,
                                        lr,
                                        epochs,
                                        momentum,
                                        batch_size,
                                        resize,
                                        decay=decay,
                                        margin=2.0,
                                        soglia=1.0,
                                        modeLoss="single")

                elif argomento.v == 4:
                    # siamese con trasfer-learning Resnet usato per la classificazione
                    # e tolto il livello per la classivicazione a 5 classi
                    # e inserito quello da 256
                    # la loss function è la Contrastive loss , double margine

                    decay = 0.004
                    lr = 0.0001
                    momentum = 0.9
                    resize = 100
                    epochs = 20
                    if argomento.e is not None:
                        epochs = argomento.e
                    batch_size = 4
                    directory = "Model-6"
                    filename = "//6_v4"
                    version = "4"
                    exp_name = 'modello6_v4'
                    name = 'RestNet_Margine_Double'

                    model = torch.load(
                        "class_1.pth"
                    )  # Usato per la classificazione fine-tuning Resnet
                    model_copy = deepcopy(model)
                    # serve per rimuovere l'ultimo livello"
                    ### adattamento
                    num_class = 256
                    model_copy.fc = nn.Linear(512, num_class)
                    model_copy.num_classes = num_class
                    print(model_copy)
                    margin1 = 0.7
                    margin2 = 1.2
                    if argomento.margin1 is not None:
                        margin1 = argomento.margin1

                    if argomento.margin2 is not None:
                        margin2 = argomento.margin2

                    train_model_margine_double(directory,
                                               filename,
                                               version,
                                               exp_name,
                                               name,
                                               model_copy,
                                               lr,
                                               epochs,
                                               momentum,
                                               batch_size,
                                               resize,
                                               decay=decay,
                                               margin1=margin1,
                                               margin2=margin2,
                                               modeLoss="double")

                else:
                    print("Versione non riconosciuta")
                    sys.stderr.write(
                        "Version not acknowledged, try --train model6 --v [ 2 | 4 ]\n"
                    )
                    exit(0)

            else:
                print("Modello non riconosciuto ")
                sys.stderr.write(
                    "Model not acknowledged, try --train [model5 | model6 ]\n")
                exit(0)

#--------------FASE  TESTING --test
    required_together_test = ('test', 'v')

    if argomento.test is not None:
        if not all([getattr(argomento, x) for x in required_together_test]):
            raise RuntimeError("Cannot supply --test without --v")
        else:
            #------ test MODEL 5
            if argomento.test == "model5":

                # ----------model 5 v 5 ---- ResNet
                if argomento.v == 5:
                    print("version", argomento.v)

                    print("model 5 v5 ResNet classi siamese con lr =0.0001 ")
                    directory = "Model-5\\"
                    path = 'modello5_v5.pth'
                    version = "5"

                    batch_size = 16
                    resize = 100

                    test_model_class(directory,
                                     path,
                                     version,
                                     resize,
                                     batch_size,
                                     margine=None)

                elif argomento.v == 7:
                    # ----------model 5 v 7 ---- MNet
                    print("version", argomento.v)

                    print("model 5 v7 MNet classi siamese con lr =0.0001")
                    directory = "Model-5\\"

                    path = 'Model-5\\7\\modello5_v7_17.pth'
                    version = "7"

                    batch_size = 16
                    resize = 100

                    test_model_class(directory,
                                     path,
                                     version,
                                     resize,
                                     batch_size,
                                     margine=None)

                else:
                    print("Versione non riconosciuta")
                    sys.stderr.write(
                        "Version not acknowledged, try --test model5 --v [ 5 | 7 ]\n"
                    )
                    exit(0)

                #----------test  MODEL 6
            elif argomento.test == "model6":

                #------ model test 6 v 2
                if argomento.v == 2:
                    print("version", argomento.v)
                    print(
                        "model6 v2 Test ResNet siamese margine one 2.0 soglia 0.92"
                    )
                    directory = "Model-6\\"
                    path = directory + "2\\" + 'modello6_v2_6.pth'
                    version = "2"
                    soglia = 0.92
                    if argomento.soglia is not None:
                        soglia = argomento.soglia
                    batch_size = 16
                    resize = 100
                    print("Soglia", soglia)
                    test_model_margine(directory,
                                       path,
                                       version,
                                       resize,
                                       batch_size,
                                       margine=soglia)

                    #-------- model test 6 v 4
                elif argomento.v == 4:
                    print("version", argomento.v)
                    print(
                        "model6 v 4 Test ResNet siamese margine double 0.7 e 1.2, numero epoche 52 "
                    )
                    directory = "Model-6\\"
                    path = directory + "4\\" + 'modello6_v4_51.pth'
                    version = "4"
                    margin1 = 0.7
                    margin2 = 1.2
                    batch_size = 16
                    resize = 100

                    test_model_margine_double(directory, path, version, resize,
                                              batch_size, margin1, margin2)
                else:
                    print("Versione non riconosciuta")
                    sys.stderr.write(
                        "Version not acknowledged, try --test model6 --v [ 2 | 4 ]\n"
                    )
                    exit(0)

            else:
                print("Modello non riconosciuto")
                sys.stderr.write(
                    "Model not acknowledged, try --test [model5 | model6 ]\n")
                exit(0)

# ---------------------PERFORMANCE

    if argomento.roc is not None:
        print(argomento.roc)
        print(argomento.v)

        #  PERFORMANCE MODEL 6 V 2
        if argomento.roc == "model6":
            # model test 6 v 2
            if argomento.v == 2:
                print("version", argomento.v)
                print(
                    "model6 v2 Test ResNet siamese margine one 2.0 soglia 1.0")
                directory = "Model-6"
                version = "2"
                path = directory + "\\" + version + '\\modello6_v2_6.pth'
                version = "2"

                batch_size = 16
                resize = 100

                test_model_performance(directory, path, version, resize,
                                       batch_size)


#--------------FASE  CONTINUE --continue
    required_together_continue = ('progress', 'v', 'e')
    # args.model will be None if v is not provided
    if argomento.progress is not None:
        if not all([getattr(argomento, x)
                    for x in required_together_continue]):
            raise RuntimeError("Cannot supply --progress without --v --e")
        else:

            #----- MODEL 6
            if argomento.progress == "model6":
                print("model", argomento.progress)

                # model continue 6 v 2
                if argomento.v == 2:
                    # siamese con trasfer-learning Resnet usato per la classificazione
                    # e tolto il livello per la classivicazione a 5 classi
                    # e inserito quello da 256
                    # la loss function è la Contrastive loss , margine single

                    decay = 0.0004
                    lr = 0.0001
                    momentum = 0.9
                    resize = 100

                    batch_size = 4
                    directory = "Model-6"
                    filename = "//6_v2"
                    version = "2"
                    exp_name = 'modello6_v2'
                    name = 'RestNet_Margine_Single'
                    if argomento.pathModel is not None:
                        path = argomento.pathModel
                    else:
                        path = 'Model-6//2//modello6_v2_13.pth'
                    model = torch.load(path)

                    epoche_avanza = argomento.e
                    continue_model_margine_single(directory,
                                                  filename,
                                                  version,
                                                  exp_name,
                                                  name,
                                                  model,
                                                  lr,
                                                  epoche_avanza,
                                                  momentum,
                                                  batch_size,
                                                  resize,
                                                  decay=decay,
                                                  margin1=2.0,
                                                  soglia=0.92,
                                                  modeLoss="single")

                # model continue 6 v 4
                elif argomento.v == 4:
                    # siamese con trasfer-learning Resnet usato per la classificazione
                    # e tolto il livello per la classivicazione a 5 classi
                    # e inserito quello da 256
                    # la loss function è la Contrastive loss , margine double

                    decay = 0.004
                    lr = 0.0001
                    momentum = 0.9
                    resize = 100

                    batch_size = 4
                    directory = "Model-6"
                    filename = "//6_v4"
                    version = "4"
                    exp_name = 'modello6_v4'
                    name = 'RestNet_Margine_Double'
                    if argomento.pathModel is not None:
                        path = argomento.pathModel
                    else:
                        path = 'Model-6//4//modello6_v4_56.pth'
                    model = torch.load(path)

                    margin1 = 0.7
                    margin2 = 1.2
                    if argomento.margin1 is not None:
                        margin1 = argomento.margin1

                    if argomento.margin2 is not None:
                        margin2 = argomento.margin2
                    epoche_avanza = argomento.e
                    continue_model_margine_double(directory,
                                                  filename,
                                                  version,
                                                  exp_name,
                                                  name,
                                                  model,
                                                  lr,
                                                  epoche_avanza,
                                                  momentum,
                                                  batch_size,
                                                  resize,
                                                  decay=decay,
                                                  margin1=margin1,
                                                  margin2=margin2,
                                                  modeLoss="double")

                # model continue 6 v 6
                elif argomento.v == 6:

                    decay = 0.02
                    lr = 0.001
                    momentum = 0.9
                    resize = 100

                    batch_size = 4
                    directory = "Model-6"
                    filename = "//6_v6"
                    version = "6"
                    exp_name = 'modello6_v6'
                    name = 'RestNet_Margine_Double'
                    if argomento.pathModel is not None:
                        path = argomento.pathModel
                    else:
                        path = 'Model-6//4//modello6_v4_51.pth'
                    model = torch.load(path)

                    margin1 = 0.7
                    margin2 = 1.2
                    if argomento.margin1 is not None:
                        margin1 = argomento.margin1

                    if argomento.margin2 is not None:
                        margin2 = argomento.margin2
                    epoche_avanza = argomento.e
                    continue_model_margine_double(directory,
                                                  filename,
                                                  version,
                                                  exp_name,
                                                  name,
                                                  model,
                                                  lr,
                                                  epoche_avanza,
                                                  momentum,
                                                  batch_size,
                                                  resize,
                                                  decay=decay,
                                                  margin1=margin1,
                                                  margin2=margin2,
                                                  modeLoss="double")
                else:
                    print("Versione non riconosciuta")
                    sys.stderr.write(
                        "Version not acknowledged, try --progress model6 --v [ 2 | 4 | 6 ]\n"
                    )
                    exit(0)
            else:
                print("Modello non riconosciuto ")
                sys.stderr.write(
                    "Model not acknowledged, try --progress [ model6 ]\n")
                exit(0)
コード例 #7
0
def train_model_margine_dynamik(directory, filename, version, exp_name, name,
                                model, lr, epochs, momentum, batch_size,
                                resize):
    dataSetPair = DataSetPairCreate(resize)
    dataSetPair.controlNormalize()

    pair_train = dataSetPair.pair_money_train
    pair_test = dataSetPair.pair_money_test
    pair_validation = dataSetPair.pair_money_val

    pair_money_train_loader = DataLoader(pair_train,
                                         batch_size=batch_size,
                                         num_workers=0,
                                         shuffle=True)
    pair_money_test_loader = DataLoader(pair_test,
                                        batch_size=batch_size,
                                        num_workers=0)
    pair_money_val_loader = DataLoader(pair_validation,
                                       batch_size=batch_size,
                                       num_workers=0)

    #training
    #modello, tempo di training, loss su train, loss su val
    createFolder(directory + "\\" + version)
    writeJsonModelInit1(directory, name, version)

    print("Training...")

    modello, f, last_loss_train, last_loss_val, last_acc_train, last_acc_val = train_margine_dynamik(
        directory,
        version,
        model,
        pair_money_train_loader,
        pair_money_val_loader,
        resize,
        batch_size,
        exp_name,
        lr=lr,
        epochs=epochs)

    print("Time computing", f)
    print("last_loss_train", last_loss_train)
    print("last_loss_val", last_loss_val)
    print("last_acc_train", last_acc_train)
    print("last_acc_val", last_acc_val)

    hyperparametr = {
        "indexEpoch": epochs - 1,
        "lr": lr,
        "momentum": momentum,
        "numSampleTrain": len(pair_train)
    }
    contrastiveLoss = {
        "lossTrain": last_loss_train,
        "lossValid": last_loss_val
    }
    accuracy = {"accuracyTrain": last_acc_train, "accuracyValid": last_acc_val}
    time = {"training": f}

    writeJsonModelClass(directory, name, version, hyperparametr, resize,
                        batch_size, contrastiveLoss, accuracy, time)

    namep = exp_name + ".pth"
    siamese_model = torch.load(namep)

    print("Testing on Validation set")

    timeVal, pair_prediction_val, pair_label_val = test_margine_dynamik(
        siamese_model, pair_money_val_loader)

    numSimilPredette = np.sum(pair_prediction_val == 0)
    print("Num Simili predette", numSimilPredette)
    numDissimilPredette = np.sum(pair_prediction_val == 1)
    print("Num Dissimil predette", numDissimilPredette)
    numSimilReali = np.sum(pair_label_val == 0)
    print("Num Simili Reali", numSimilReali)
    numDissimilReali = np.sum(pair_label_val == 1)
    print("Num Dissimil Reali", numDissimilReali)

    #calculate Accuracy
    print(pair_prediction_val[0:10])
    print(pair_label_val[0:10])
    accuracyVal = accuracy_score(pair_label_val, pair_prediction_val)
    print("Accuarcy di test: %0.4f" % accuracyVal)
    #calculate Precision
    precisionVal = precision_score(pair_label_val, pair_prediction_val)
    print("Precision di test: %0.4f" % precisionVal)
    #calculate Recall
    recallVal = recall_score(pair_label_val, pair_prediction_val)
    print("Recall di test: %0.4f" % recallVal)
    #calculate F1 score
    if recallVal != 0.0 and precisionVal != 0.0:

        scores_testing_val = f1_score(pair_label_val,
                                      pair_prediction_val,
                                      average=None)
        scores_testing_val = scores_testing_val.mean()
        print("mF1 score di testing: %0.4f" % scores_testing_val)

    else:
        scores_testing_val = 0.000
        print("mscoref1", scores_testing_val)

    key = ["accuracy", "precision", "recall", "mf1_score", "time"]
    entry = [
        "accuracyVal", "precisionVal", "recallVal", "f1_score_Val", "testVal"
    ]
    value = [accuracyVal, precisionVal, recallVal, scores_testing_val, timeVal]
    addValueJsonModel(directory + "modelTrained.json", version, key[0],
                      entry[0], value[0])
    addValueJsonModel(directory + "modelTrained.json", version, key[1],
                      entry[1], value[1])
    addValueJsonModel(directory + "modelTrained.json", version, key[2],
                      entry[2], value[2])
    addValueJsonModel(directory + "modelTrained.json", version, key[3],
                      entry[3], value[3])
    addValueJsonModel(directory + "modelTrained.json", version, key[4],
                      entry[4], value[4])
コード例 #8
0
def train_model_margine_double(directory,
                               filename,
                               version,
                               exp_name,
                               name,
                               model,
                               lr,
                               epochs,
                               momentum,
                               batch_size,
                               resize,
                               decay=None,
                               margin1=None,
                               margin2=None,
                               modeLoss=None):
    # directory es "Model-6"
    createFolder(directory)
    dataSetPair = DataSetPairCreate(resize)
    dataSetPair.controlNormalize()

    pair_train = dataSetPair.pair_money_train
    pair_test = dataSetPair.pair_money_test
    pair_validation = dataSetPair.pair_money_val

    pair_money_train_loader = DataLoader(pair_train,
                                         batch_size=batch_size,
                                         num_workers=0,
                                         shuffle=True)
    pair_money_test_loader = DataLoader(pair_test,
                                        batch_size=batch_size,
                                        num_workers=0)
    pair_money_val_loader = DataLoader(pair_validation,
                                       batch_size=batch_size,
                                       num_workers=0)

    siamese_money = model  # modello

    createFolder(directory + "\\" + version)
    createFolder(directory + "\\" + version + "\\" + "Metod2")

    writeJsonModelInit1(directory, name, version)

    print("Training...")

    modello, f, last_loss_train, last_loss_val, last_acc_train, last_acc_val = train_siamese_margin_double(
        directory,
        version,
        siamese_money,
        pair_money_train_loader,
        pair_money_val_loader,
        resize,
        batch_size,
        exp_name=exp_name,
        lr=lr,
        epochs=epochs,
        momentum=momentum,
        margin1=margin1,
        margin2=margin2,
        logdir='logs',
        decay=decay,
        modeLoss=modeLoss)

    print("Time computing", f)
    print("last_loss_train", last_loss_train)
    print("last_loss_val", last_loss_val)
    print("last_acc_train", last_acc_train)
    print("last_acc_val", last_acc_val)
    if not decay is None:

        hyperparametr = {
            "indexEpoch": epochs - 1,
            "lr": lr,
            "decay": decay,
            "momentum": momentum,
            "numSampleTrain": len(pair_train)
        }
    else:
        hyperparametr = {
            "indexEpoch": epochs - 1,
            "lr": lr,
            "momentum": momentum,
            "numSampleTrain": len(pair_train)
        }

    contrastiveLoss = {
        "lossTrain": last_loss_train,
        "lossValid": last_loss_val
    }
    accuracy = {"accuracyTrain": last_acc_train, "accuracyValid": last_acc_val}
    time = {"training": f}

    writeJsonModelClass(directory, name, version, hyperparametr, resize,
                        resize, batch_size, contrastiveLoss, accuracy, time)
コード例 #9
0
def train_model(directory,
                filename,
                version,
                exp_name,
                name,
                model,
                lr,
                epochs,
                momentum,
                batch_size,
                resize,
                modeLoss=None):

    warnings.filterwarnings('always')

    directory = directory
    version = version
    lr = lr
    epochs = epochs
    momentum = momentum
    batch_size = batch_size
    resize = resize
    controlFileCSV()
    controlFileCSVPair()

    dataSetPair = DataSetPairCreate(resize)
    dataSetPair.controlNormalize()

    pair_train = dataSetPair.pair_money_train
    pair_test = dataSetPair.pair_money_test
    pair_validation = dataSetPair.pair_money_val

    pair_money_train_loader = DataLoader(pair_train,
                                         batch_size=batch_size,
                                         num_workers=0,
                                         shuffle=True)
    pair_money_test_loader = DataLoader(pair_test,
                                        batch_size=batch_size,
                                        num_workers=0)
    pair_money_val_loader = DataLoader(pair_validation,
                                       batch_size=batch_size,
                                       num_workers=0)

    siamese_money = model  # modello
    #training
    #modello, tempo di training, loss su train, loss su val
    print("Training...")

    siamese_money, timeTraining, array_loss_train, array_loss_val, array_sample_train, array_sample_valid, array_acc_train, array_acc_valid, labels_train, prediction_train, labels_val, prediction_val = train_siamese(
        siamese_money,
        pair_money_train_loader,
        pair_money_val_loader,
        exp_name,
        lr=lr,
        epochs=epochs,
        modeLoss=modeLoss)

    print("time Training\n ", timeTraining)
    print("Loss last on train", array_loss_train[-1])
    print("Loss last on valid\n", array_loss_val[-1])

    print("Array sample last on train", array_sample_train[-1])
    print("Array sample on last  valid\n", array_sample_valid[-1])

    print("lunghezza array accuracy TRAIN", len(array_acc_train))
    print("Lunghezza array SAMPLE train", len(array_sample_train))

    print("lunghezza array accuracy VALID", len(array_acc_valid))
    print("Lunghezza array SAMPLE VALID", len(array_sample_valid))

    #controlla se è presente la directory, altrimenti la crei
    createFolder(directory)
    #plot
    plotLoss(directory, filename, array_loss_train, array_loss_val,
             array_sample_train, array_sample_valid)
    plotAccuracy(directory, filename, array_acc_train, array_acc_valid,
                 array_sample_train, array_sample_valid)

    #------------------------TESTARE SU DATI DEL TRAIN
    #device = "cuda" if torch.cuda.is_available() else "cpu"
    #siamese_money.to(device)
    print("Score on dataTrain...")
    #pair_predictionTrain, pair_labelTrain , timeTrain = test_siamese(siamese_money, pair_money_train_loader, margin=2 )
    #calculate Accuracy
    accuracyTrain = accuracy_score(labels_train, prediction_train)
    print("Accuarcy di train of last batch: %0.4f" % accuracyTrain)
    #calculate Precision
    precisionTrain = precision_score(labels_train, prediction_train)
    print("Precision di of last batch train: %0.4f" % precisionTrain)
    #calculate Recall
    recallTrain = recall_score(labels_train, prediction_train)
    print("Recall of last batch di train: %0.4f" % recallTrain)

    if recallTrain != 0.0 and precisionTrain != 0.0:
        #calculate F1 score
        scores_training = f1_score(labels_train,
                                   prediction_train,
                                   average=None)
        scores_training = scores_training.mean()
        print("F1 score of last bacth di train: %0.4f" % scores_training)
    else:
        scores_training = 0.000
        print("F1 score of last bacth di train: %0.4f" % scores_training)

    #------------------------TESTARE SU DATI DEL VALID
    print("Score on dataValid...")
    #pair_predictionValid, pair_labelValid , timeValid = test_siamese(siamese_money, pair_money_val_loader, margin=2 )
    #calculate Accuracy
    accuracyValid = accuracy_score(labels_val, prediction_val)
    print("Accuarcy di validation: %0.4f" % accuracyValid)
    #calculate Precision
    precisionValid = precision_score(labels_val, prediction_val)
    print("Precision di validation: %0.4f" % precisionValid)
    #calculate Recall
    recallValid = recall_score(labels_val, prediction_val)
    print("Recall di validation: %0.4f" % recallValid)
    #calculate F1 score
    if recallValid != 0.0 and recallTrain != 0.0:

        scores_valid = f1_score(labels_val, prediction_val, average=None)
        scores_valid = scores_valid.mean()
        print("mF1 score di validation: %0.4f" % scores_valid)

    else:
        scores_valid = 0.00
        print("mF1 score di validation: %0.4f" % scores_valid)
    """ QUESTO VA FATTO IN FASE DI TESTING UTILIZZANDO I DATI DEL TEST E IL COMANDO RELOAD 
    #------------------------TESTARE SU DATI DEL TEST
    print("Testing on dataTest....")
    pair_prediction, pair_label, timeTest = test_siamese(siamese_money, pair_money_test_loader, margin=2 )
        #calculate Accuracy
    accuracyTest = accuracy_score(pair_label, pair_prediction)
    print("Accuarcy di test: %0.4f"% accuracyTest)
        #calculate Precision
    precisionTest = precision_score(pair_label, pair_prediction)
    print("Precision di test: %0.4f"% precisionTest)
        #calculate Recall
    recallTest = recall_score(pair_label, pair_prediction)
    print("Recall di test: %0.4f"% recallTest)
        #calculate F1 score
    scores_testing = f1_score(pair_label,pair_prediction, average=None)
    print("F1 score di testing: %0.4f"% scores_testing)
    """

    #-------------------------

    hyperparametr = {
        "indexEpoch": epochs - 1,
        "lr": lr,
        "momentum": momentum,
        "numSampleTrain": len(pair_train)
    }
    contrastiveLoss = {
        "lossTrain": array_loss_train[-1],
        "lossValid": array_loss_val[-1]
    }
    accuracy = {"accuracyTrain": accuracyTrain, "accuracyValid": accuracyValid}
    precision = {
        "precisionTrain": precisionTrain,
        "precisionValid": precisionValid
    }
    recall = {"recallTrain": recallTrain, "recallValid": recallValid}
    f1score = {
        "f1_score_Train": scores_training,
        "f1_score_Valid": scores_valid
    }
    time = {"training": timeTraining}

    writeJsonModel(directory, name, version, hyperparametr, batch_size,
                   contrastiveLoss, accuracy, f1score, precision, recall, time)
コード例 #10
0
def continue_classificazione(directory,model,version,exp_name,name,lr, momentum,resize,batch_size, epoche_avanza):
    createFolder(directory)
    createFolder(directory+"\\"+version)
    controlFileCSVBase()
    
    dataSetClass = DatasetClassi(resize)
    dataSetClass.controlNormalize()
    
    train = dataSetClass.dataset_train_norm
    validation = dataSetClass.dataset_valid_norm
    test = dataSetClass.dataset_test_norm
    
    
    money_train_loader = DataLoader(train, batch_size=batch_size, num_workers=0, shuffle=True)
    money_test_loader = DataLoader(test, batch_size=batch_size, num_workers=0)
    money_val_loader = DataLoader(validation , batch_size = batch_size, num_workers=0)

    epoche_fatte =0
    array_loss_train=[]
    array_loss_valid=[]
    array_accuracy_train=[]
    array_accuracy_valid=[]
    array_glb_train=[]
    array_glb_valid=[]
    
    percorso1 = directory+"//"+"modelTrained.json"
    
    with open(percorso1,"r") as file:
        data = json.load(file)
        
    if not( data.get(version) is None):
        obj = data[version]
        if not(obj.get("hyperparametr") is None):
            para = obj["hyperparametr"]
            
            if not(para.get("indexEpoch") is None):
                epoche_fatte = para["indexEpoch"]
                
        if not(obj.get("time") is None):
            tempo = obj["time"]
            
            if not(tempo.get("training") is None):
                tempoTrain = tempo["training"]
    
    percorso2=  directory+"//"+version+"//"+"value_arrays.json"
    with open(percorso2,"r") as file2:
        data2 = json.load(file2)
        
    if not( data2.get("array_loss_train") is None):
        array_loss_train = data2["array_loss_train"]
    
    if not( data2.get("array_loss_valid") is None):
        array_loss_valid = data2["array_loss_valid"]  
    
    if not( data2.get("array_accuracy_train") is None):
        array_accuracy_train = data2["array_accuracy_train"]
        
    if not( data2.get("array_accuracy_valid") is None):
        array_accuracy_valid = data2["array_accuracy_valid"]
    
    
    if not( data2.get("array_glb_train") is None):
        array_glb_train = data2["array_glb_train"]
    
        
    if not( data2.get("array_glb_valid") is None):
        array_glb_valid = data2["array_glb_valid"]                

    print("Indice epoca gia fatta: ",epoche_fatte)
    print("Epoche avanza :",epoche_avanza)
    print("Array loss train ", array_loss_train)
    print("Array loss valid", array_loss_valid)
    
    print("Array accuracy train", array_accuracy_train)
    print("Array accuracy valid", array_accuracy_valid)
    
    print("Array glb train", array_glb_train)
    print("array glb valid ", array_glb_valid)
    
    epochs = epoche_fatte + epoche_avanza + 1 
    dizionario_array = {"epoche_fatte":epoche_fatte, "epoche_avanza":epoche_avanza,"l_train": array_loss_train, "l_valid": array_loss_valid, "a_train":array_accuracy_train,"a_valid":array_accuracy_valid, "g_train":array_glb_train, "g_valid":array_glb_valid, "tempoTrain":tempoTrain }
    
    modello ,f, last_loss_train, last_loss_val, last_acc_train, last_acc_val=train_class(directory,version, model, money_train_loader, money_val_loader,resize, batch_size, exp_name , lr=lr, epochs = epochs, dizionario = dizionario_array)
    
    print("Time computing", f)
    tempo1 = float(tempoTrain)
    tempo2 = float(f) 
    tempo = tempo1+tempo2
    f =str(tempo) 
    print("last_loss_train",last_loss_train)
    print("last_loss_val",last_loss_val)
    print("last_acc_train",last_acc_train)
    print("last_acc_val",last_acc_val)
    
    hyperparametr = {"indexEpoch":epochs-1,"lr":lr, "momentum" : momentum, "batchSize":batch_size }
    loss = {"lossTrain": last_loss_train, "lossValid":last_loss_val}
    accuracy = {"accuracyTrain":last_acc_train , "accuracyValid":last_acc_val }
    time = {"training": f}
    
    
    writeJsonModelClass(directory,name,version, hyperparametr,resize,batch_size, loss, accuracy ,time)
    
         
コード例 #11
0
def testing_classificazionePair(directory,path, version,resize,batch_size):
    # directory "Classe
    
    model = torch.load(path)
    controlFileCSV()
    
    controlFileCSV()
    dataSetPair = DataSetPairCreate(resize)
    dataSetPair.controlNormalize()
    pair_test = dataSetPair.pair_money_test
    pair_money_test_loader = DataLoader(pair_test, batch_size, num_workers=0)
    
    createFolder(directory)
    createFolder(directory+"\\"+version)
    
   
    timeTest,pair_prediction, pair_label  = test_classifierPair(model, pair_money_test_loader)
    
    accuracyTest = accuracy_score(pair_label, pair_prediction)
    print("Accuarcy di test: %0.4f"% accuracyTest)
        #calculate Precision
    precisionTest = precision_score(pair_label, pair_prediction,average='micro')
    print("Precision di test: %0.4f"% precisionTest)
        #calculate Recall
    recallTest = recall_score(pair_label, pair_prediction,average='micro')
    print("Recall di test: %0.4f"% recallTest)
        #calculate F1 score
    if recallTest!= 0.0 and precisionTest != 0.0:
        
        scores_testing = f1_score(pair_label,pair_prediction, average='micro')
        scores_testing = scores_testing.mean()
        print("mF1 score di testing: %0.4f"% scores_testing)
        
        
    else:
        scores_testing = 0.000
        print("mscoref1",scores_testing)

    key=["accuracy","precision","recall","mf1_score","time"]
    entry=["accuracyTest_Pair","precisionTest_Pair","recallTest_Pair","f1_score_Test_Pair","testing_Pair"]
    value=[accuracyTest,precisionTest,recallTest,scores_testing,timeTest]
    addValueJsonModel(directory+"\\modelTrained.json",version, key[0] ,entry[0], value[0])
    addValueJsonModel(directory+"\\modelTrained.json",version, key[1] ,entry[1], value[1])
    addValueJsonModel(directory+"\\modelTrained.json",version, key[2] ,entry[2], value[2])
    addValueJsonModel(directory+"\\modelTrained.json",version, key[3] ,entry[3], value[3])
    addValueJsonModel(directory+"\\modelTrained.json",version, key[4] ,entry[4], value[4])
    
    print("Classification Report")
    print(classification_report(pair_label, pair_prediction))
    
    cm = confusion_matrix(pair_label, pair_prediction)
    print("Matrice di confusione \n",cm)
    print("\n")
    #"--------
    FP = cm.sum(axis=0) - np.diag(cm)  
    FN = cm.sum(axis=1) - np.diag(cm)
    TP = np.diag(cm)
    TN = cm.sum() - (FP + FN + TP)

    FP = FP.astype(float)
    FN = FN.astype(float)
    TP = TP.astype(float)
    TN = TN.astype(float)


    # Sensitivity, hit rate, recall, or true positive rate
    TPR = TP/(TP+FN)
    # Specificity or true negative rate
    TNR = TN/(TN+FP) 
    # Precision or positive predictive value
    PPV = TP/(TP+FP)
    # Negative predictive value
    NPV = TN/(TN+FN)
    # Fall out or false positive rate
    FPR = FP/(FP+TN)
    # False negative rate
    FNR = FN/(TP+FN)
    # False discovery rate
    FDR = FP/(TP+FP)
    print("\n")
    print("TNR:",TNR)
    print("FPR:",FPR)
    print("FNR:",FNR)
    print("TPR:",TPR)
    
    #----------------
    

    
    cm.sum(1).reshape(-1,1)
        
    cm=cm/cm.sum(1).reshape(-1,1) #il reshape serve a trasformare il vettore in un vettore colonna
    print("\n")
    print("Matrice di confusione normalizzata \n",cm)
    """
    tnr, fpr, fnr, tpr = cm.ravel()
    print("\n")
    print("TNR:",tnr)
    print("FPR:",fpr)
    print("FNR:",fnr)
    print("TPR:",tpr)
    """
    key = "performance_test_Pair"
    entry=["TNR","FPR","FNR","TPR"]
    value=[list(TNR), list(FPR), list(FNR), list(TPR)]
    addValueJsonModel(directory+"\\modelTrained.json",version, key ,entry[0], value[0])
    addValueJsonModel(directory+"\\modelTrained.json",version, key ,entry[1], value[1])
    addValueJsonModel(directory+"\\modelTrained.json",version, key ,entry[2], value[2])
    addValueJsonModel(directory+"\\modelTrained.json",version, key ,entry[3], value[3])
コード例 #12
0
def continue_model_margine_single( directory,filename,version, exp_name,name,model, lr,epoche_avanza ,momentum,batch_size,resize,decay=None,margin1=2.0,soglia=0.92, modeLoss="single"):

    createFolder(directory+"\\"+version)
    createFolder(directory+"\\"+version+"\\"+"Metod2")
    dataSetPair = DataSetPairCreate(resize)
    dataSetPair.controlNormalize()
   
    pair_train = dataSetPair.pair_money_train
    pair_test = dataSetPair.pair_money_test
    pair_validation = dataSetPair.pair_money_val
        
    pair_money_train_loader = DataLoader(pair_train, batch_size=batch_size, num_workers=0, shuffle=True)
    pair_money_test_loader = DataLoader(pair_test, batch_size=batch_size, num_workers=0)
    pair_money_val_loader = DataLoader(pair_validation , batch_size = batch_size, num_workers=0)
        
    epoche_fatte =0
    array_loss_train=[]
    array_loss_valid=[]
    array_acc_train=[]
    array_acc_valid=[]
    array_glb_train=[]
    array_glb_valid=[]
        
    percorso1 = directory+"//"+"modelTrained.json"
    with open(percorso1,"r") as file:
        data = json.load(file)
        
    if not( data.get(version) is None):
        obj = data[version]
        if not(obj.get("hyperparametr") is None):
            para = obj["hyperparametr"]
            
            if not(para.get("indexEpoch") is None):
                epoche_fatte = para["indexEpoch"]
                
        if not(obj.get("time") is None):
            tempo = obj["time"]
            
            if not(tempo.get("training") is None):
                tempoTrain = tempo["training"]
            
            
            
    
    percorso2=  directory+"//"+version+"//"+"value_arrays.json"
    with open(percorso2,"r") as file2:
        data2 = json.load(file2)
        
    if not( data2.get("array_loss_train") is None):
        array_loss_train = data2["array_loss_train"]
    
    if not( data2.get("array_loss_valid") is None):
        array_loss_valid = data2["array_loss_valid"]  
    
    if not( data2.get("array_accuracy_train") is None):
        array_accuracy_train = data2["array_accuracy_train"]
        
    if not( data2.get("array_accuracy_valid") is None):
        array_accuracy_valid = data2["array_accuracy_valid"]
    
    
    if not( data2.get("array_glb_train") is None):
        array_glb_train = data2["array_glb_train"]
    
        
    if not( data2.get("array_glb_valid") is None):
        array_glb_valid = data2["array_glb_valid"]
        """
    percorso3=  directory+"//"+version+"//"+"Metod2"+"//"+"value_performance.json"
    with open(percorso3,"r") as file3:
        data3 = json.load(file3)
        
    if not( data3.get("array_acc_valid") is None):
        array_acc_valid_2 = data3["array_acc_valid"]
        
    if not( data3.get("array_f1_valid") is None):
        array_f1_valid_2 = data3["array_f1_valid"]
        
    if not( data3.get("array_recall_valid") is None):
        array_recall_valid_2 = data3["array_recall_valid"]
        
    if not( data3.get("array_precision_valid") is None):
        array_precision_valid_2 = data3["array_precision_valid"]
    
    if not( data3.get("array_tp_valid") is None):
        array_tp_valid_2 = data3["array_tp_valid"]
    
    if not( data3.get("array_fp_valid") is None):
        array_fp_valid_2 = data3["array_fp_valid"]
    
    if not( data3.get("gl_step_valid") is None):
        array_glb_valid_2 = data3["gl_step_valid"]
    
    """
        
    
        
    
                
        
            
    print("Indice epoca gia fatta: ",epoche_fatte)
    print("Epoche avanza :",epoche_avanza)
    print("Array loss train ", array_loss_train)
    print("Array loss valid", array_loss_valid)
    
    print("Array accuracy train", array_accuracy_train)
    print("Array accuracy valid", array_accuracy_valid)
    
    print("Array glb train", array_glb_train)
    print("array glb valid ", array_glb_valid)
     #indice epoca_fatte 19 
    epochs = epoche_fatte + epoche_avanza + 1 
    dizionario_array = {"epoche_fatte":epoche_fatte, "epoche_avanza":epoche_avanza,"l_train": array_loss_train, "l_valid": array_loss_valid, "a_train":array_accuracy_train,"a_valid":array_accuracy_valid, "g_train":array_glb_train, "g_valid":array_glb_valid, "tempoTrain":tempoTrain }
    
    modello ,f, last_loss_train, last_loss_val, last_acc_train, last_acc_val= train_siamese_margine(directory,version,model, pair_money_train_loader, pair_money_val_loader,resize,batch_size, exp_name=exp_name,lr=lr, epochs=epochs, momentum=momentum, margin=margin1,soglia=soglia, logdir="logs", decay=decay, modeLoss=modeLoss, dizionario= dizionario_array)
    
    print("Time computing", f)
    print("last_loss_train",last_loss_train)
    print("last_loss_val",last_loss_val)
    print("last_acc_train",last_acc_train)
    print("last_acc_val",last_acc_val)
    if not decay is None:
         
        hyperparametr = {"indexEpoch":epochs-1,"lr":lr,"decay":decay, "momentum" : momentum, "numSampleTrain": len(pair_train), "soglia":soglia, "soglia":soglia }
    else:  
        hyperparametr = {"indexEpoch":epochs-1,"lr":lr, "momentum" : momentum, "numSampleTrain": len(pair_train),"margin1":margin1, "soglia":soglia}
    
    contrastiveLoss = {"lossTrain": last_loss_train, "lossValid":last_loss_val}
    accuracy = {"accuracyTrain":last_acc_train , "accuracyValid":last_acc_val }
    
    time = {"training": f}
    
    writeJsonModelClass(directory,name,version, hyperparametr,resize, batch_size, contrastiveLoss, accuracy ,time)