def main():
    parser = argparse.ArgumentParser(description='Algorithms traditional ML')
    parser.add_argument('--dataset', type=str, required=True, \
            choices=["IP", "UP", "SV", "UH", "DIP", "DUP", "DIPr", "DUPr"], \
            help='dataset (options: IP, UP, SV, UH, DIP, DUP, DIPr, DUPr)')

    parser.add_argument('--repeat', default=1, type=int, help='Number of runs')
    parser.add_argument('--components', default=None, type=int, help='dimensionality reduction')
    parser.add_argument('--preprocess', default="standard", type=str, help='Preprocessing')
    parser.add_argument('--splitmethod', default="sklearn", type=str, help='Method for split datasets')
    parser.add_argument('--random_state', default=None, type=int, 
                    help='The seed of the pseudo random number generator to use when shuffling the data')
    parser.add_argument('--tr_percent', default=0.15, type=float, help='samples of train set')

    #########################################
    parser.add_argument('--set_parameters', action='store_false', help='Set some optimal parameters')
    ############## CHANGE PARAMS ############
    parser.add_argument('--n_est', default=200, type=int, help='The number of trees in the forest')
    parser.add_argument('--m_s_split', default=2, type=int,
                    help='The minimum number of samples required to split an internal node')
    parser.add_argument('--max_feat', default=40, type=int, 
                    help='The number of features to consider when looking for the best split')
    parser.add_argument('--depth', default=60, type=int, help='The maximum depth of the tree')
    #########################################

    args = parser.parse_args()
    state = {k: v for k, v in args._get_kwargs()}

    if args.set_parameters: args = set_params(args)

    pixels, labels, num_class = \
                    mydata.loadData(args.dataset, num_components=args.components, preprocessing=args.preprocess)
    pixels = pixels.reshape(-1, pixels.shape[-1])

    stats = np.ones((args.repeat, num_class+3)) * -1000.0 # OA, AA, K, Aclass
    for pos in range(args.repeat):
        if args.dataset in ["UH", "DIP", "DUP", "DIPr", "DUPr"]:
            x_train, x_test, y_train, y_test = \
                mydata.load_split_data_fix(args.dataset, pixels)#, rand_state=args.random_state+pos)
        else:
            labels = labels.reshape(-1)
            pixels = pixels[labels!=0]
            labels = labels[labels!=0] - 1
            rstate = args.random_state+pos if args.random_state != None else None
            x_train, x_test, y_train, y_test = \
                mydata.split_data(pixels, labels, args.tr_percent, rand_state=rstate)
        clf = RandomForestClassifier(\
                    n_estimators=args.n_est, min_samples_split=args.m_s_split, \
                    max_features=args.max_feat, max_depth=args.depth) \
                    .fit(x_train, y_train)
        stats[pos,:] = mymetrics.reports(clf.predict(x_test), y_test)[2]
    print(stats[-1])
def main():
    parser = argparse.ArgumentParser(description='Algorithms traditional ML')
    parser.add_argument(
        '--dataset',
        type=str,
        required=True,
        choices=["IP", "UP", "SV", "UH", "DIP", "DUP", "DIPr", "DUPr"],
        help='dataset (options: IP, UP, SV, UH, DIP, DUP, DIPr, DUPr)')
    parser.add_argument('--repeat', default=1, type=int, help='Number of runs')
    parser.add_argument('--components',
                        default=None,
                        type=int,
                        help='dimensionality reduction')
    parser.add_argument('--spatialsize',
                        default=19,
                        type=int,
                        help='windows size')
    parser.add_argument('--wdecay',
                        default=0.02,
                        type=float,
                        help='apply penalties on layer parameters')
    parser.add_argument('--preprocess',
                        default="standard",
                        type=str,
                        help='Preprocessing')
    parser.add_argument('--splitmethod',
                        default="sklearn",
                        type=str,
                        help='Method for split datasets')
    parser.add_argument(
        '--random_state',
        default=None,
        type=int,
        help=
        'The seed of the pseudo random number generator to use when shuffling the data'
    )
    parser.add_argument('--tr_percent',
                        default=0.15,
                        type=float,
                        help='samples of train set')
    parser.add_argument('--use_val',
                        action='store_true',
                        help='Use validation set')
    parser.add_argument('--val_percent',
                        default=0.1,
                        type=float,
                        help='samples of val set')
    parser.add_argument('--verbosetrain',
                        action='store_true',
                        help='Verbose train')
    #########################################
    parser.add_argument('--set_parameters',
                        action='store_false',
                        help='Set some optimal parameters')
    ############## CHANGE PARAMS ############
    parser.add_argument(
        '--batch_size',
        default=100,
        type=int,
        help='Number of training examples in one forward/backward pass.')
    parser.add_argument(
        '--epochs',
        default=300,
        type=int,
        help='Number of full training cycle on the training set')
    #########################################

    args = parser.parse_args()
    state = {k: v for k, v in args._get_kwargs()}

    if args.set_parameters:
        args = set_params(args)

    pixels, labels, num_class = mydata.loadData(args.dataset,
                                                num_components=args.components,
                                                preprocessing=args.preprocess)
    pixels, labels = mydata.createImageCubes(pixels,
                                             labels,
                                             windowSize=args.spatialsize,
                                             removeZeroLabels=False)
    stats = np.ones(
        (args.repeat, num_class + 3)) * -1000.0  # OA, AA, K, Aclass
    for pos in range(args.repeat):
        rstate = args.random_state + pos if args.random_state != None else None
        if args.dataset in ["UH", "DIP", "DUP", "DIPr", "DUPr"]:
            x_train, x_test, y_train, y_test = mydata.load_split_data_fix(
                args.dataset, pixels)  #, rand_state=args.random_state+pos)
        else:
            pixels = pixels[labels != 0]
            labels = labels[labels != 0] - 1
            x_train, x_test, y_train, y_test = mydata.split_data(
                pixels, labels, args.tr_percent, rand_state=rstate)

        if args.use_val:
            x_val, x_test, y_val, y_test = mydata.split_data(x_test,
                                                             y_test,
                                                             args.val_percent,
                                                             rand_state=rstate)

        inputshape = x_train.shape[1:]
        clf = get_model_compiled(inputshape, num_class, w_decay=args.wdecay)
        valdata = (x_val, keras_to_categorical(
            y_val, num_class)) if args.use_val else (x_test,
                                                     keras_to_categorical(
                                                         y_test, num_class))
        clf.fit(x_train,
                keras_to_categorical(y_train, num_class),
                batch_size=args.batch_size,
                epochs=args.epochs,
                verbose=args.verbosetrain,
                validation_data=valdata,
                callbacks=[
                    ModelCheckpoint("/tmp/best_model.h5",
                                    monitor='val_accuracy',
                                    verbose=0,
                                    save_best_only=True)
                ])
        del clf
        K.clear_session()
        gc.collect()
        clf = load_model("/tmp/best_model.h5")
        print("PARAMETERS", clf.count_params())
        stats[pos, :] = mymetrics.reports(
            np.argmax(clf.predict(x_test), axis=1), y_test)[2]
    print(args.dataset, list(stats[-1]))
Esempio n. 3
0
def main():
    parser = argparse.ArgumentParser(description='Transfer Learning HSI')
    parser.add_argument('--dataset1', type=str, required=True, \
            choices=["IP", "UP", "SV", "UH", "DIP", "DUP", "DIPr", "DUPr"], \
            help='dataset (options: IP, UP, SV, UH, DIP, DUP, DIPr, DUPr)')
    parser.add_argument('--dataset2', type=str, required=True, \
            choices=["IP", "UP", "SV", "UH", "DIP", "DUP", "DIPr", "DUPr"], \
            help='dataset (options: IP, UP, SV, UH, DIP, DUP, DIPr, DUPr)')
    parser.add_argument('--arch', type=str, required=True, \
            choices=["CNN1D", "CNN2D", "CNN2D40bands", "CNN3D"], \
            help='architecture (options: CNN1D, CNN2D, CNN2D40bands, CNN3D)')
    parser.add_argument('--repeat', default=1, type=int, help='Number of runs')
    parser.add_argument('--preprocess',
                        default="standard",
                        type=str,
                        help='Preprocessing')
    parser.add_argument('--splitmethod',
                        default="sklearn",
                        type=str,
                        help='Method for split datasets')
    parser.add_argument(
        '--random_state',
        default=None,
        type=int,
        help=
        'The seed of the pseudo random number generator to use when shuffling the data'
    )

    parser.add_argument('--tr_samples',
                        default=2,
                        type=int,
                        help='samples per class train set')
    parser.add_argument('--use_val',
                        action='store_true',
                        help='Use validation set')
    parser.add_argument('--val_percent',
                        default=0.1,
                        type=float,
                        help='samples of val set')

    parser.add_argument('--use_transfer_learning',
                        action='store_true',
                        help='Use transfer learning')
    parser.add_argument('--search_base_model',
                        action='store_true',
                        help='Search base model')

    parser.add_argument('--lr1',
                        default=1e-3,
                        type=float,
                        help='Learning rate base model')
    parser.add_argument('--lr2',
                        default=8e-4,
                        type=float,
                        help='Learning rate fine model')

    parser.add_argument('--verbosetrain1',
                        action='store_true',
                        help='Verbose train')
    parser.add_argument('--verbosetrain2',
                        action='store_true',
                        help='Verbose train')
    #########################################
    parser.add_argument('--set_parameters',
                        action='store_false',
                        help='Set some optimal parameters')
    ############## CHANGE PARAMS ############
    parser.add_argument(
        '--batch_size1',
        default=100,
        type=int,
        help='Number of training examples in one forward/backward pass.')
    parser.add_argument(
        '--epochs1',
        default=50,
        type=int,
        help='Number of full training cycle on the training set')
    parser.add_argument(
        '--batch_size2',
        default=100,
        type=int,
        help='Number of training examples in one forward/backward pass.')
    parser.add_argument(
        '--epochs2',
        default=500,
        type=int,
        help='Number of full training cycle on the training set')
    #########################################

    args = parser.parse_args()
    state = {k: v for k, v in args._get_kwargs()}

    if args.set_parameters: args = set_params(args)

    args.components = 1 if "CNN2D" == args.arch else 40
    args.spatialsize = 1 if args.arch == "CNN1D" else 19
    if args.dataset2 in ["IP", "SV", "DIP", "DIPr"]: num_class = 16
    stats = np.ones(
        (args.repeat, num_class + 3)) * -1000.0  # OA, AA, K, Aclass
    for pos in range(args.repeat):
        rstate = args.random_state + pos if args.random_state != None else None
        if args.search_base_model:
            pixels, labels, num_class = \
                            mydata.loadData(args.dataset1, num_components=args.components, preprocessing=args.preprocess)

            if args.arch != "CNN1D":
                pixels, labels = mydata.createImageCubes(
                    pixels,
                    labels,
                    windowSize=args.spatialsize,
                    removeZeroLabels=True)
                if args.arch == "CNN3D":
                    inputshape = (pixels.shape[1], pixels.shape[2],
                                  pixels.shape[3], 1)
                    pixels = pixels.reshape(pixels.shape[0], pixels.shape[1],
                                            pixels.shape[2], pixels.shape[3],
                                            1)
                else:
                    inputshape = (pixels.shape[1], pixels.shape[2],
                                  pixels.shape[3])
            else:
                pixels = pixels.reshape(-1, pixels.shape[-1])
                labels = labels.reshape(-1)
                pixels = pixels[labels != 0]
                labels = labels[labels != 0] - 1
                inputshape = (pixels.shape[-1], 1)
                pixels = pixels.reshape(pixels.shape[0], pixels.shape[1], 1)

            pixels, labels = mydata.random_unison(pixels,
                                                  labels,
                                                  rstate=rstate)
            clf = get_model_compiled(args, inputshape, num_class)
            clf.fit(pixels,
                    keras_to_categorical(labels),
                    batch_size=args.batch_size1,
                    epochs=args.epochs1,
                    verbose=args.verbosetrain1,
                    callbacks=[
                        ModelCheckpoint("/tmp/best_base_model.h5",
                                        monitor='loss',
                                        verbose=0,
                                        save_best_only=True)
                    ])
            del pixels, labels
            del clf
            K.clear_session()
            gc.collect()
            exit()

        pixels, labels, num_class = \
                        mydata.loadData(args.dataset2, num_components=args.components, preprocessing=args.preprocess)

        if args.arch != "CNN1D":
            pixels, labels = mydata.createImageCubes(
                pixels,
                labels,
                windowSize=args.spatialsize,
                removeZeroLabels=False)
            if args.arch == "CNN3D":
                inputshape = (pixels.shape[1], pixels.shape[2],
                              pixels.shape[3], 1)
                pixels = pixels.reshape(pixels.shape[0], pixels.shape[1],
                                        pixels.shape[2], pixels.shape[3], 1)
            else:
                inputshape = (pixels.shape[1], pixels.shape[2],
                              pixels.shape[3])
        else:
            pixels = pixels.reshape(-1, pixels.shape[-1])
            labels = labels.reshape(-1)
            pixels = pixels[labels != 0]
            labels = labels[labels != 0] - 1
            inputshape = (pixels.shape[-1], 1)
            pixels = pixels.reshape(pixels.shape[0], pixels.shape[1], 1)

        if args.dataset2 in ["UH", "DIP", "DUP", "DIPr", "DUPr"]:
            x_train, x_test, y_train, y_test = \
                mydata.load_split_data_fix(args.dataset, pixels)
        else:
            pixels = pixels[labels != 0]
            labels = labels[labels != 0] - 1
            x_train, x_test, y_train, y_test = \
                mydata.split_data(pixels, labels, [args.tr_samples]*num_class, splitdset="custom2", rand_state=rstate)
        if args.use_val:
            x_val, x_test, y_val, y_test = \
                mydata.split_data(x_test, y_test, args.val_percent, rand_state=rstate)
            if args.arch == "CNN1D":
                x_val = x_val.reshape(x_val.shape[0], x_val.shape[1], 1)
            elif args.arch == "CNN3D":
                x_val = x_val.reshape(x_val.shape[0], x_val.shape[1],
                                      x_val.shape[2], x_val.shape[3], 1)
        if args.arch == "CNN1D":
            x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], 1)
            x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], 1)
        elif args.arch == "CNN3D":
            x_train = x_train.reshape(x_train.shape[0], x_train.shape[1],
                                      x_train.shape[2], x_train.shape[3], 1)
            x_test = x_test.reshape(x_test.shape[0], x_test.shape[1],
                                    x_test.shape[2], x_test.shape[3], 1)
            inputshape = (pixels.shape[1], pixels.shape[2], pixels.shape[3], 1)

        valdata = (x_val, keras_to_categorical(
            y_val, num_class)) if args.use_val else (x_test,
                                                     keras_to_categorical(
                                                         y_test, num_class))
        clf = get_pretrained_model_compiled(args, inputshape, num_class)

        clf.fit(x_train,
                keras_to_categorical(y_train),
                batch_size=args.batch_size2,
                epochs=args.epochs2,
                verbose=args.verbosetrain2,
                validation_data=valdata,
                callbacks=[
                    ModelCheckpoint("/tmp/best_model.h5",
                                    monitor='val_accuracy',
                                    verbose=0,
                                    save_best_only=True)
                ])
        del clf
        K.clear_session()
        gc.collect()
        clf = load_model("/tmp/best_model.h5")
        #print("PARAMETERS", clf.count_params())
        stats[pos, :] = mymetrics.reports(
            np.argmax(clf.predict(x_test), axis=1), y_test)[2]
    print(stats[-1])
Esempio n. 4
0
def main():
    parser = argparse.ArgumentParser(description='Algorithms traditional ML')
    parser.add_argument('--dataset', type=str, required=True, \
            choices=["IP", "UP", "SV", "UH", "DIP", "DUP", "DIPr", "DUPr"], \
            help='dataset (options: IP, UP, SV, UH, DIP, DUP, DIPr, DUPr)')
    parser.add_argument('--arch', type=str, required=True, \
            choices=["VGG16", "VGG19", "RESNET50", "INCEPTIONV3", "DENSENET121", "MOBILENET", "XCEPTION"], \
            help='architecture (options: VGG16, VGG19, RESNET50, INCEPTIONV3, DENSENET121, MOBILENET, XCEPTION)')

    parser.add_argument('--repeat', default=1, type=int, help='Number of runs')
    parser.add_argument('--components',
                        default=3,
                        type=int,
                        help='dimensionality reduction')
    parser.add_argument('--spatialsize',
                        default=31,
                        type=int,
                        help='windows size')
    parser.add_argument('--lrate',
                        default=1e-4,
                        type=float,
                        help='Learning rate')
    parser.add_argument('--preprocess',
                        default="standard",
                        type=str,
                        help='Preprocessing')
    parser.add_argument('--splitmethod',
                        default="sklearn",
                        type=str,
                        help='Method for split datasets')
    parser.add_argument(
        '--random_state',
        default=None,
        type=int,
        help=
        'The seed of the pseudo random number generator to use when shuffling the data'
    )
    parser.add_argument('--tr_percent',
                        default=0.15,
                        type=float,
                        help='samples of train set')
    parser.add_argument('--use_val',
                        action='store_true',
                        help='Use validation set')
    parser.add_argument('--val_percent',
                        default=0.1,
                        type=float,
                        help='samples of val set')
    parser.add_argument('--verbosetrain',
                        action='store_true',
                        help='Verbose train')
    #########################################
    parser.add_argument('--set_parameters',
                        action='store_false',
                        help='Set some optimal parameters')
    ############## CHANGE PARAMS ############
    parser.add_argument(
        '--batch_size',
        default=100,
        type=int,
        help='Number of training examples in one forward/backward pass.')
    parser.add_argument(
        '--epochs',
        default=200,
        type=int,
        help='Number of full training cycle on the training set')
    #########################################

    args = parser.parse_args()
    state = {k: v for k, v in args._get_kwargs()}

    if args.set_parameters: args = set_params(args)

    pixels, labels, num_class = \
                    mydata.loadData(args.dataset, num_components=args.components, preprocessing=args.preprocess)
    pixels, labels = mydata.createImageCubes(pixels,
                                             labels,
                                             windowSize=args.spatialsize,
                                             removeZeroLabels=False)
    pixels = np.array([
        cv2.resize(a, (a.shape[0] + 1, a.shape[1] + 1),
                   interpolation=cv2.INTER_CUBIC) for a in pixels
    ])

    stats = np.ones(
        (args.repeat, num_class + 3)) * -1000.0  # OA, AA, K, Aclass
    for pos in range(args.repeat):
        rstate = args.random_state + pos if args.random_state != None else None
        if args.dataset in ["UH", "DIP", "DUP", "DIPr", "DUPr"]:
            x_train, x_test, y_train, y_test = \
                mydata.load_split_data_fix(args.dataset, pixels)#, rand_state=args.random_state+pos)
        else:
            pixels = pixels[labels != 0]
            labels = labels[labels != 0] - 1
            x_train, x_test, y_train, y_test = \
                mydata.split_data(pixels, labels, args.tr_percent, rand_state=rstate)

        if args.use_val:
            x_val, x_test, y_val, y_test = \
                mydata.split_data(x_test, y_test, args.val_percent, rand_state=rstate)

        base_model, modlrate = get_model_pretrain(args.arch)
        x = base_model.output
        x = GlobalAveragePooling2D()(x)
        x = Dense(256, activation='relu')(x)
        x = Dropout(0.5)(x)
        predictions = Dense(len(np.unique(labels)),
                            activation='softmax',
                            name='predictions')(x)
        clf = Model(base_model.input, predictions)

        valdata = (x_val, keras_to_categorical(
            y_val, num_class)) if args.use_val else (x_test,
                                                     keras_to_categorical(
                                                         y_test, num_class))
        #for layer in clf.layers: layer.trainable = True
        clf.compile(loss=categorical_crossentropy,
                    optimizer=Adam(lr=args.lrate),
                    metrics=['accuracy'])
        clf.fit(x_train,
                keras_to_categorical(y_train, num_class),
                batch_size=args.batch_size,
                epochs=5,
                verbose=args.verbosetrain,
                validation_data=valdata,
                callbacks=[
                    ModelCheckpoint("/tmp/best_model.h5",
                                    monitor='val_loss',
                                    verbose=0,
                                    save_best_only=True)
                ])
        del clf
        K.clear_session()
        gc.collect()

        clf = load_model("/tmp/best_model.h5")
        for layer in base_model.layers:
            layer.trainable = False
        #for layer in clf.layers[:-3]: layer.trainable = False
        #for idlayer in [-1,-2,-3]: clf.layers[idlayer].trainable = True
        clf.compile(loss=categorical_crossentropy,
                    optimizer=Adam(lr=args.lrate * modlrate),
                    metrics=['accuracy'])
        clf.fit(x_train,
                keras_to_categorical(y_train, num_class),
                batch_size=args.batch_size,
                epochs=50,
                verbose=args.verbosetrain,
                validation_data=valdata,
                callbacks=[
                    ModelCheckpoint("/tmp/best_model.h5",
                                    monitor='val_accuracy',
                                    verbose=0,
                                    save_best_only=True)
                ])
        del clf
        K.clear_session()
        gc.collect()
        clf = load_model("/tmp/best_model.h5")
        #print("PARAMETERS", clf.count_params())
        stats[pos, :] = mymetrics.reports(
            np.argmax(clf.predict(x_test), axis=1), y_test)[2]
    print(args.dataset, args.arch, args.tr_percent, list(stats[-1]))
def main():
    parser = argparse.ArgumentParser(description='Algorithms traditional ML')
    parser.add_argument('--dataset', type=str, required=True, \
            choices=["IP", "UP", "SV", "UH", "DIP", "DUP", "DIPr", "DUPr"], \
            help='dataset (options: IP, UP, SV, UH, DIP, DUP, DIPr, DUPr)')

    parser.add_argument('--repeat', default=1, type=int, help='Number of runs')
    parser.add_argument('--components',
                        default=None,
                        type=int,
                        help='dimensionality reduction')
    parser.add_argument('--preprocess',
                        default="standard",
                        type=str,
                        help='Preprocessing')
    parser.add_argument('--splitmethod',
                        default="sklearn",
                        type=str,
                        help='Method for split datasets')
    parser.add_argument(
        '--random_state',
        default=None,
        type=int,
        help=
        'The seed of the pseudo random number generator to use when shuffling the data'
    )
    parser.add_argument('--tr_percent',
                        default=0.15,
                        type=float,
                        help='samples of train set')
    #########################################
    parser.add_argument('--set_parameters',
                        action='store_false',
                        help='Set some optimal parameters')
    ############## CHANGE PARAMS ############
    parser.add_argument('--C',
                        default=1,
                        type=int,
                        help='Inverse of regularization strength')
    #########################################

    args = parser.parse_args()
    state = {k: v for k, v in args._get_kwargs()}
    if args.set_parameters: args = set_params(args)

    pixels, labels, num_class = \
                    mydata.loadData(args.dataset, num_components=args.components, preprocessing=args.preprocess)
    pixels = pixels.reshape(-1, pixels.shape[-1])

    stats = np.ones(
        (args.repeat, num_class + 3)) * -1000.0  # OA, AA, K, Aclass
    for pos in range(args.repeat):
        if args.dataset in ["UH", "DIP", "DUP", "DIPr", "DUPr"]:
            x_train, x_test, y_train, y_test = \
                mydata.load_split_data_fix(args.dataset, pixels)#, rand_state=args.random_state+pos)
        else:
            labels = labels.reshape(-1)
            pixels = pixels[labels != 0]
            labels = labels[labels != 0] - 1
            rstate = args.random_state + pos if args.random_state != None else None
            x_train, x_test, y_train, y_test = \
                mydata.split_data(pixels, labels, args.tr_percent, rand_state=rstate)
        clf = LogisticRegression(penalty='l2', dual=False, tol=1e-20, C=args.C, \
                        solver='lbfgs', multi_class='multinomial', max_iter=5000,\
                        fit_intercept=True, n_jobs=-1)\
                        .fit(x_train, y_train)
        stats[pos, :] = np.round(
            mymetrics.reports(clf.predict(x_test), y_test)[2], 2)
    print(stats[-1])
def main():
    parser = argparse.ArgumentParser(description='Algorithms traditional ML')
    parser.add_argument(
        '--dataset',
        type=str,
        required=True,
        choices=["IP", "UP", "SV", "UH", "DIP", "DUP", "DIPr", "DUPr", "KSC"],
        help='dataset (options: IP, UP, SV, UH, DIP, DUP, DIPr, DUPr, KSC)')
    parser.add_argument('--repeat', default=1, type=int, help='Number of runs')
    parser.add_argument('--components',
                        default=None,
                        type=int,
                        help='dimensionality reduction')
    parser.add_argument('--preprocess',
                        default="minmax",
                        type=str,
                        help='Preprocessing')
    parser.add_argument('--splitmethod',
                        default="sklearn",
                        type=str,
                        help='Method for split datasets')
    parser.add_argument(
        '--random_state',
        default=None,
        type=int,
        help=
        'The seed of the pseudo random number generator to use when shuffling the data'
    )
    parser.add_argument('--tr_percent',
                        default=0.15,
                        type=float,
                        help='samples of train set')
    #########################################
    parser.add_argument('--set_parameters',
                        action='store_false',
                        help='Set some optimal parameters')
    ############## CHANGE PARAMS ############
    parser.add_argument('--C',
                        default=1,
                        type=int,
                        help='Inverse of regularization strength')
    parser.add_argument(
        '--g',
        default=1,
        type=float,
        help='Kernel coefficient for ‘rbf’, ‘poly’ and ‘sigmoid’.')
    #########################################

    args = parser.parse_args()
    state = {k: v for k, v in args._get_kwargs()}

    if args.set_parameters:
        args = set_params(args)

    pixels, labels, num_class = mydata.loadData(args.dataset,
                                                num_components=args.components,
                                                preprocessing=args.preprocess)
    pixels = pixels.reshape(-1, pixels.shape[-1])
    pixels_bak = np.copy(pixels)
    labels_bak = np.copy(labels)

    KAPPA = []
    OA = []
    AA = []
    TRAINING_TIME = []
    TESTING_TIME = []
    ELEMENT_ACC = np.zeros((args.repeat, num_class))
    for pos in range(args.repeat):
        pixels = pixels_bak
        labels = labels_bak
        if args.dataset in ["UH", "DIP", "DUP", "DIPr", "DUPr"]:
            x_train, x_test, y_train, y_test = mydata.load_split_data_fix(
                args.dataset, pixels)  #, rand_state=args.random_state+pos)
        else:
            labels = labels.reshape(-1)
            pixels = pixels[labels != 0]
            labels = labels[labels != 0] - 1
            rstate = args.random_state + pos if args.random_state != None else None
            x_train, x_test, y_train, y_test = mydata.split_data(
                pixels, labels, args.tr_percent, rand_state=rstate)
        tic1 = time()
        clf = SVC(gamma=args.g, C=args.C, tol=1e-7).fit(x_train, y_train)
        toc1 = time()
        tic2 = time()
        _, _, overall_acc, average_acc, kappa, each_acc = mymetrics.reports(
            clf.predict(x_test), y_test)
        toc2 = time()

        KAPPA.append(kappa)
        OA.append(overall_acc)
        AA.append(average_acc)
        TRAINING_TIME.append(toc1 - tic1)
        TESTING_TIME.append(toc2 - tic2)
        ELEMENT_ACC[pos, :] = each_acc

    print(OA, AA, KAPPA)
    save_report.record_output(
        save_report.args_to_text(vars(args)), OA, AA, KAPPA, ELEMENT_ACC,
        TRAINING_TIME, TESTING_TIME, './report/' + 'svm_' + str(args.dataset) +
        '_' + str(args.tr_percent) + '.txt')
Esempio n. 7
0
def main():
    parser = argparse.ArgumentParser(description='Algorithms traditional ML')
    parser.add_argument('--dataset', type=str, required=True, \
            choices=["IP", "UP", "SV", "UH", "DIP", "DUP", "DIPr", "DUPr"], \
            help='dataset (options: IP, UP, SV, UH, DIP, DUP, DIPr, DUPr)')
    parser.add_argument('--type_recurrent', type=str, required=True, \
            choices=["RNN", "GRU", "LSTM"], \
            help='function (options: "RNN", "GRU", "LSTM")')
    parser.add_argument('--repeat', default=1, type=int, help='Number of runs')
    parser.add_argument('--components',
                        default=None,
                        type=int,
                        help='dimensionality reduction')
    parser.add_argument('--preprocess',
                        default="standard",
                        type=str,
                        help='Preprocessing')
    parser.add_argument('--splitmethod',
                        default="sklearn",
                        type=str,
                        help='Method for split datasets')
    parser.add_argument(
        '--random_state',
        default=None,
        type=int,
        help=
        'The seed of the pseudo random number generator to use when shuffling the data'
    )
    parser.add_argument('--tr_percent',
                        default=0.15,
                        type=float,
                        help='samples of train set')
    parser.add_argument('--use_val',
                        action='store_true',
                        help='Use validation set')
    parser.add_argument('--val_percent',
                        default=0.1,
                        type=float,
                        help='samples of val set')
    parser.add_argument('--verbosetrain',
                        action='store_true',
                        help='Verbose train')
    #########################################
    parser.add_argument('--set_parameters',
                        action='store_false',
                        help='Set some optimal parameters')
    ############## CHANGE PARAMS ############
    parser.add_argument(
        '--batch_size',
        default=100,
        type=int,
        help='Number of training examples in one forward/backward pass.')
    parser.add_argument(
        '--epochs',
        default=10,
        type=int,
        help='Number of full training cycle on the training set')
    #########################################

    args = parser.parse_args()
    state = {k: v for k, v in args._get_kwargs()}

    if args.set_parameters: args = set_params(args)

    pixels, labels, num_class = \
                    mydata.loadData(args.dataset, num_components=args.components, preprocessing=args.preprocess)
    pixels = pixels.reshape(-1, pixels.shape[-1])

    stats = np.ones(
        (args.repeat, num_class + 3)) * -1000.0  # OA, AA, K, Aclass
    for pos in range(args.repeat):
        rstate = args.random_state + pos if args.random_state != None else None
        if args.dataset in ["UH", "DIP", "DUP", "DIPr", "DUPr"]:
            x_train, x_test, y_train, y_test = \
                mydata.load_split_data_fix(args.dataset, pixels)#, rand_state=args.random_state+pos)
        else:
            labels = labels.reshape(-1)
            pixels = pixels[labels != 0]
            labels = labels[labels != 0] - 1
            x_train, x_test, y_train, y_test = \
                mydata.split_data(pixels, labels, args.tr_percent, rand_state=rstate)

        if args.use_val:
            x_val, x_test, y_val, y_test = \
                mydata.split_data(x_test, y_test, args.val_percent, rand_state=rstate)
            x_val = x_val.reshape(x_val.shape[0], x_val.shape[1], 1)
        x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], 1)
        x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], 1)

        n_bands, sequences = x_train.shape[1:]
        clf = get_model_compiled(n_bands, sequences, num_class,
                                 args.type_recurrent)
        valdata = (x_val, keras_to_categorical(
            y_val, num_class)) if args.use_val else (x_test,
                                                     keras_to_categorical(
                                                         y_test, num_class))

        # checkpoint
        filepath = "../temp/best_model.h5"
        checkpoint = ModelCheckpoint(filepath,
                                     monitor='val_acc',
                                     verbose=0,
                                     save_best_only=True,
                                     mode='max',
                                     period=1)
        callbacks_list = [checkpoint]
        clf.fit(x_train,
                keras_to_categorical(y_train, num_class),
                batch_size=args.batch_size,
                epochs=args.epochs,
                verbose=args.verbosetrain,
                validation_data=valdata,
                callbacks=callbacks_list)
        del clf
        K.clear_session()
        gc.collect()
        clf = load_model("../temp/best_model.h5")
        print("PARAMETERS", clf.count_params())
        stats[pos, :] = mymetrics.reports(
            np.argmax(clf.predict(x_test), axis=1), y_test)[2]
    print(args.dataset, args.type_recurrent, list(stats[-1]))
def main():
    parser = argparse.ArgumentParser(description='Algorithms traditional ML')
    parser.add_argument(
        '--dataset',
        type=str,
        required=True,
        choices=["IP", "UP", "SV", "UH", "DIP", "DUP", "DIPr", "DUPr", "KSC"],
        help='dataset (options: IP, UP, SV, UH, DIP, DUP, DIPr, DUPr, KSC)')

    parser.add_argument('--repeat', default=1, type=int, help='Number of runs')
    parser.add_argument('--components',
                        default=None,
                        type=int,
                        help='dimensionality reduction')
    parser.add_argument('--preprocess',
                        default="standard",
                        type=str,
                        help='Preprocessing')
    parser.add_argument('--splitmethod',
                        default="sklearn",
                        type=str,
                        help='Method for split datasets')
    parser.add_argument(
        '--random_state',
        default=None,
        type=int,
        help=
        'The seed of the pseudo random number generator to use when shuffling the data'
    )
    parser.add_argument('--tr_percent',
                        default=0.15,
                        type=float,
                        help='samples of train set')

    #########################################
    parser.add_argument('--set_parameters',
                        action='store_false',
                        help='Set some optimal parameters')
    ############## CHANGE PARAMS ############
    parser.add_argument('--n_est',
                        default=200,
                        type=int,
                        help='The number of trees in the forest')
    parser.add_argument(
        '--m_s_split',
        default=2,
        type=int,
        help='The minimum number of samples required to split an internal node'
    )
    parser.add_argument(
        '--max_feat',
        default=40,
        type=int,
        help=
        'The number of features to consider when looking for the best split')
    parser.add_argument('--depth',
                        default=60,
                        type=int,
                        help='The maximum depth of the tree')
    #########################################

    args = parser.parse_args()
    state = {k: v for k, v in args._get_kwargs()}

    if args.set_parameters:
        args = set_params(args)

    pixels, labels, num_class = mydata.loadData(args.dataset,
                                                num_components=args.components,
                                                preprocessing=args.preprocess)
    pixels = pixels.reshape(-1, pixels.shape[-1])
    pixels_bak = np.copy(pixels)
    labels_bak = np.copy(labels)

    KAPPA = []
    OA = []
    AA = []
    TRAINING_TIME = []
    TESTING_TIME = []
    ELEMENT_ACC = np.zeros((args.repeat, num_class))
    for pos in range(args.repeat):
        pixels = pixels_bak
        labels = labels_bak
        if args.dataset in ["UH", "DIP", "DUP", "DIPr", "DUPr"]:
            x_train, x_test, y_train, y_test = mydata.load_split_data_fix(
                args.dataset, pixels)  #, rand_state=args.random_state+pos)
        else:
            labels = labels.reshape(-1)
            pixels = pixels[labels != 0]
            labels = labels[labels != 0] - 1
            rstate = args.random_state + pos if args.random_state != None else None
            x_train, x_test, y_train, y_test = mydata.split_data(
                pixels, labels, args.tr_percent, rand_state=rstate)
        tic1 = time()
        clf = RandomForestClassifier(n_estimators=args.n_est,
                                     min_samples_split=args.m_s_split,
                                     max_features=args.max_feat,
                                     max_depth=args.depth).fit(
                                         x_train, y_train)
        toc1 = time()

        tic2 = time()
        _, _, overall_acc, average_acc, kappa, each_acc = mymetrics.reports(
            clf.predict(x_test), y_test)
        toc2 = time()

        KAPPA.append(kappa)
        OA.append(overall_acc)
        AA.append(average_acc)
        TRAINING_TIME.append(toc1 - tic1)
        TESTING_TIME.append(toc2 - tic2)
        ELEMENT_ACC[pos, :] = each_acc

    print(OA, AA, KAPPA)
    save_report.record_output(
        save_report.args_to_text(vars(args)), OA, AA, KAPPA, ELEMENT_ACC,
        TRAINING_TIME, TESTING_TIME, './report/' + 'random_forest_' +
        args.dataset + '_' + str(args.tr_percent) + '.txt')
Esempio n. 9
0
def main():
    parser = argparse.ArgumentParser(description='Algorithms traditional ML')
    parser.add_argument(
        '--dataset',
        type=str,
        required=True,
        choices=["IP", "UP", "SV", "UH", "DIP", "DUP", "DIPr", "DUPr", "KSC"],
        help='dataset (options: IP, UP, SV, UH, DIP, DUP, DIPr, DUPr, KSC)')
    parser.add_argument('--type_recurrent',
                        type=str,
                        required=True,
                        choices=["RNN", "GRU", "LSTM"],
                        help='function (options: "RNN", "GRU", "LSTM")')
    parser.add_argument('--repeat', default=1, type=int, help='Number of runs')
    parser.add_argument('--components',
                        default=None,
                        type=int,
                        help='dimensionality reduction')
    parser.add_argument('--preprocess',
                        default="standard",
                        type=str,
                        help='Preprocessing')
    parser.add_argument('--splitmethod',
                        default="sklearn",
                        type=str,
                        help='Method for split datasets')
    parser.add_argument(
        '--random_state',
        default=None,
        type=int,
        help=
        'The seed of the pseudo random number generator to use when shuffling the data'
    )
    parser.add_argument('--tr_percent',
                        default=0.15,
                        type=float,
                        help='samples of train set')
    parser.add_argument('--use_val',
                        action='store_true',
                        help='Use validation set')
    parser.add_argument('--val_percent',
                        default=0.1,
                        type=float,
                        help='samples of val set')
    parser.add_argument('--verbosetrain',
                        action='store_false',
                        help='Verbose train')
    #########################################
    parser.add_argument('--set_parameters',
                        action='store_false',
                        help='Set some optimal parameters')
    ############## CHANGE PARAMS ############
    parser.add_argument(
        '--batch_size',
        default=100,
        type=int,
        help='Number of training examples in one forward/backward pass.')
    parser.add_argument(
        '--epochs',
        default=150,
        type=int,
        help='Number of full training cycle on the training set')
    #########################################

    args = parser.parse_args()
    state = {k: v for k, v in args._get_kwargs()}

    if args.set_parameters:
        args = set_params(args)

    pixels, labels, num_class = mydata.loadData(args.dataset,
                                                num_components=args.components,
                                                preprocessing=args.preprocess)
    pixels = pixels.reshape(-1, pixels.shape[-1])
    pixels_bak = np.copy(pixels)
    labels_bak = np.copy(labels)

    KAPPA = []
    OA = []
    AA = []
    TRAINING_TIME = []
    TESTING_TIME = []
    ELEMENT_ACC = np.zeros((args.repeat, num_class))
    for pos in range(args.repeat):
        pixels = pixels_bak
        labels = labels_bak
        rstate = args.random_state + pos if args.random_state != None else None
        if args.dataset in ["UH", "DIP", "DUP", "DIPr", "DUPr"]:
            x_train, x_test, y_train, y_test = \
                mydata.load_split_data_fix(args.dataset, pixels)#, rand_state=args.random_state+pos)
        else:
            labels = labels.reshape(-1)
            pixels = pixels[labels != 0]
            labels = labels[labels != 0] - 1
            x_train, x_test, y_train, y_test = mydata.split_data(
                pixels, labels, args.tr_percent, rand_state=rstate)

        if args.use_val:
            x_val, x_test, y_val, y_test = mydata.split_data(x_test,
                                                             y_test,
                                                             args.val_percent,
                                                             rand_state=rstate)
            x_val = x_val.reshape(x_val.shape[0], x_val.shape[1], 1)
        x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], 1)
        x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], 1)

        n_bands, sequences = x_train.shape[1:]
        clf = get_model_compiled(n_bands, sequences, num_class,
                                 args.type_recurrent)
        valdata = (x_val, keras_to_categorical(
            y_val, num_class)) if args.use_val else (x_test,
                                                     keras_to_categorical(
                                                         y_test, num_class))
        tic1 = time()
        clf.fit(x_train,
                keras_to_categorical(y_train, num_class),
                batch_size=args.batch_size,
                epochs=args.epochs,
                verbose=args.verbosetrain,
                validation_data=valdata,
                callbacks=[
                    ModelCheckpoint("/tmp/best_model.h5",
                                    monitor='val_accuracy',
                                    verbose=0,
                                    save_best_only=True)
                ])
        del clf
        K.clear_session()
        gc.collect()
        clf = load_model("/tmp/best_model.h5")
        print("PARAMETERS", clf.count_params())
        toc1 = time()

        tic2 = time()
        _, _, overall_acc, average_acc, kappa, each_acc = mymetrics.reports(
            np.argmax(clf.predict(x_test), axis=1), y_test)
        toc2 = time()
        KAPPA.append(kappa)
        OA.append(overall_acc)
        AA.append(average_acc)
        TRAINING_TIME.append(toc1 - tic1)
        TESTING_TIME.append(toc2 - tic2)
        ELEMENT_ACC[pos, :] = each_acc

    print(OA, AA, KAPPA)
    save_report.record_output(
        save_report.args_to_text(vars(args)), OA, AA, KAPPA, ELEMENT_ACC,
        TRAINING_TIME, TESTING_TIME, './report/' + args.type_recurrent + '_' +
        args.dataset + '_' + str(args.tr_percent) + '.txt')
Esempio n. 10
0
all_mean2 = np.zeros([6,14])
oak = np.zeros([6,3])

num1_class = 16
num2_class = 9
num3_class = 11


for j in range(6):
    if j == 0: 
        stats = np.ones((10, num1_class+3)) * -1000.0 # OA, AA, K, Aclass
        for i in range (10):
            pres = np.loadtxt("./Prob_MRF/"+add_data1+"dual_outcome/probability/"+str(i)+"labels_pred.txt")
            pres = pres.astype(np.float32)
            pres.reshape(-1)
            stats[i,:] = mymetrics.reports(pres, indian_labels0)[2]
        oa_std = np.std(stats[:,0],ddof=1)
        aa_std = np.std(stats[:,1],ddof=1)
        kappa_std = np.std(stats[:,2],ddof=1)
        all_mean0[j,:] = np.mean(stats,axis=0)
        oak[j,0]=oa_std
        oak[j,1]=aa_std
        oak[j,2]=kappa_std

    if j == 1: 
        stats = np.ones((10, num1_class+3)) * -1000.0 # OA, AA, K, Aclass
        for i in range (10):
            pres = np.loadtxt("./Prob_MRF/"+add_data2+"dual_outcome/probability/"+str(i)+"labels_pred.txt")
            pres = pres.astype(np.float32)
            pres.reshape(-1)
            stats[i,:] = mymetrics.reports(pres, indian_labels)[2]