Ejemplo n.º 1
0
def deep_learning_experiment_vector(param, train, test, label_info):
    nb_class = label_info[0]
    nb_people = label_info[1]
    param.nb_modal = 3

    if param.method == method_select['people']:
        nb_repeat = nb_people
    elif param.method in method_select['repeat']:
        nb_repeat = 20
    elif param.method in method_select["CrossValidation"]:
        nb_repeat = param.collect["CrossValidation"] * 5

    # config = tf.ConfigProto()
    # config.gpu_options.allow_growth = True
    for repeat in range(nb_repeat):

        print(f"{dt()} :: {repeat+1}/{nb_repeat} experiment progress")

        tartr = train[repeat]
        tarte = test[repeat]

        tr_data = [tartr["data_0"], tartr["data_1"], tartr["data_2"]]
        te_data = [tarte["data_0"], tarte["data_1"], tarte["data_2"]]
        if param.datatype == "type":
            tr_label = tartr["tag"] - 1
            te_label = tarte["tag"] - 1
            nb_class = label_info[0]
        elif param.datatype == "disease":
            tr_label = tartr["tag"]
            te_label = tarte["tag"]
            nb_class = label_info[0]

        cat_tr = preprocessing.to_categorical(tr_label, nb_class)
        cat_te = preprocessing.to_categorical(te_label, nb_class)

        model = model_compactor.model_setting(param, train[repeat],
                                              test[repeat],
                                              [nb_class, nb_people])
        print(f"{dt()} :: MODEL={param.model_name}, METHOD={param.method}")

        log_dir = f"../Log/{param.model_name}_{param.method}"
        # log_dir = f"/home/blackcow/mlpa/workspace/gait-rework/gait-rework/Log/{param.model_name}_{param.method}"

        # tb_hist = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=0, write_graph=True, write_images=True)

        # # model.summary()
        # model_result = model.fit(x=tr_data, y=cat_tr, epochs=param.epochs, batch_size=param.batch_size
        #                          , validation_data=(te_data, cat_te), verbose=2, callbacks=[tb_hist])

        # model_score = model.evaluate(x=te_data, y=cat_te, verbose=0)

        while True:
            x_train1 = list()
            x_train2 = list()
            x_train3 = list()

            y_train = list()

            print(f"total batch : {len(tr_data[0]) // param.batch_size}")
            for i in range(len(tr_data[0]) // param.batch_size):
                x_batch1 = tr_data[0][i * param.batch_size:(i + 1) *
                                      param.batch_size]
                x_batch2 = tr_data[1][i * param.batch_size:(i + 1) *
                                      param.batch_size]
                x_batch3 = tr_data[2][i * param.batch_size:(i + 1) *
                                      param.batch_size]

                x_train1.append(x_batch1)
                x_train2.append(x_batch2)
                x_train3.append(x_batch3)
                y_train.append(cat_tr[i * param.batch_size:(i + 1) *
                                      param.batch_size])

            model.summary()
            optimizer = tf.optimizers.Adam(lr=0.0001)
            loss_object = tf.keras.losses.CategoricalCrossentropy()
            fin_loss_object = tf.keras.losses.CategoricalCrossentropy()

            train_loss = tf.keras.metrics.Mean(name='train_loss')
            train_accuracy = tf.keras.metrics.CategoricalAccuracy(
                name='train_accuracy')

            test_loss = tf.keras.metrics.Mean(name='test_loss')
            test_accuracy = tf.keras.metrics.CategoricalAccuracy(
                name='test_accuracy')

            for epoch in range(param.epochs):

                for step, (x_batch1, x_batch2, x_batch3, y_batch) in enumerate(
                        zip(x_train1, x_train2, x_train3, y_train)):
                    # predicted = model.predict([x_batch1, x_batch2, x_batch3])

                    with tf.GradientTape() as tape:
                        logits = model([x_batch1, x_batch2, x_batch3])

                        loss_val1 = loss_object(y_batch, logits[0])
                        loss_val2 = loss_object(y_batch, logits[1])
                        loss_val3 = loss_object(y_batch, logits[2])

                        true_loss = tf.math.add(logits[0] * 0.3,
                                                logits[1] * 0.3,
                                                logits[2] * 0.3)
                        true_loss = fin_loss_object(y_batch, logits[6])
                    # gen = model.train_on_batch(, [y_batch, y_batch, y_batch])
                    # print(f'train_loss : {gen}')

                    grads = tape.gradient(true_loss, model.trainable_variables)
                    optimizer.apply_gradients(
                        (grads, var)
                        for (grads,
                             var) in zip(grads, model.trainable_variables)
                        if grads is not None)

                    tr_loss = train_loss(true_loss)
                    tr_acc1 = train_accuracy(y_batch, logits[0])
                    tr_acc2 = train_accuracy(y_batch, logits[1])
                    tr_acc3 = train_accuracy(y_batch, logits[2])

                    tr_acc4 = train_accuracy(y_batch, logits[6])

                    sim_images = np.reshape(logits[3], (-1, 128, 128, 1))
                    logdir = f"../Log/similarity_matrix/{datetime.datetime.now().strftime('%Y%m%d-%H%M%S')}"
                    file_writer = tf.summary.create_file_writer(logdir)
                    with file_writer.as_default():
                        tf.summary.scalar("train_loss", tr_loss, step=epoch)
                        tf.summary.scalar("train_acc", tr_acc4, step=epoch)
                        tf.summary.image("Similarity Matrix",
                                         sim_images,
                                         step=epoch,
                                         max_outputs=12)

                    print(
                        f'[step : {step}/{len(x_train1)}] [epochs : {epoch}/{param.epochs}]'
                        f'train loss : {tr_loss}, domain 1-3_accuracy : {tr_acc1*100}, {tr_acc2*100}, {tr_acc3*100}'
                    )
                    print(
                        f'train merge acc : {tr_acc4*100} test loss : not implemented...'
                    )

            model.evaluate([te_data[0], te_data[1], te_data[2]])

        if repeat == 0:
            tracking = [
                dt(), param.method, param.model_name, param.nb_combine, repeat,
                model_score[0], model_score[1]
            ]
            ds.stock_result(tracking)
        else:
            tracking = [dt(), repeat, model_score[0], model_score[1]]
            ds.stock_result(tracking)

        ds.save_result_obo(param, tracking)

        model_result = None
        model_score = None
        tracking = None
        tr_data = None
        te_date = None
        K.clear_session()
        tf.keras.backend.clear_session()
        sess.close()
Ejemplo n.º 2
0
def deep_learning_experiment_ensemble(param, train, test, label_info):
    nb_class = label_info[0]
    nb_people = label_info[1]
    # param.nb_modal = 3

    if param.method == method_select['people']:
        nb_repeat = nb_people
    elif param.method in method_select['repeat']:
        nb_repeat = 20
    elif param.method in method_select["CrossValidation"]:
        nb_repeat = param.collect["CrossValidation"] * 5

    # config = tf.ConfigProto()
    # config.gpu_options.allow_growth = True
    for repeat in range(nb_repeat):
        # config = tf.compat.v1.ConfigProto()

        # sess = tf.Session(config=config)
        # use GPU memory in the available GPU memory capacity

        # sess = tf.compat.v1.Session(config=config)
        # set_session(sess)

        print(f"{dt()} :: {repeat+1}/{nb_repeat} experiment progress")

        tartr = train[repeat]
        tarte = test[repeat]

        tr_data = list()
        te_data = list()
        for idx in range(param.nb_modal):
            tr_data.append(tartr[f"data_{idx}"])
            te_data.append(tarte[f"data_{idx}"])
        if param.datatype == "type":
            tr_label = tartr["tag"]
            te_label = tarte["tag"]
            nb_class = label_info[0]
        elif param.datatype == "disease":
            tr_label = tartr["tag"]
            te_label = tarte["tag"]
            nb_class = label_info[0]

        cat_tr = preprocessing.to_categorical(tr_label, nb_class)
        cat_te = preprocessing.to_categorical(te_label, nb_class)

        model1, model2 = model_compactor.model_setting(param, train[repeat],
                                                       test[repeat],
                                                       [nb_class, nb_people])
        print(f"{dt()} :: MODEL={param.model_name}, METHOD={param.method}")

        log_dir = f"../Log/{param.model_name}_{param.method}"
        # log_dir = f"/home/blackcow/mlpa/workspace/gait-rework/gait-rework/Log/{param.model_name}_{param.method}"

        tb_hist = tf.keras.callbacks.TensorBoard(log_dir=log_dir,
                                                 histogram_freq=0,
                                                 write_graph=True,
                                                 write_images=True)

        print(
            f"{dt()} :: Train Sample : {len(tr_data[0])}, Test Sample : {len(te_data[0])}"
        )

        model1.summary()
        model1.fit(x=tr_data,
                   y=cat_tr,
                   epochs=param.epochs,
                   batch_size=param.batch_size,
                   validation_data=(te_data, cat_te),
                   verbose=2,
                   callbacks=[tb_hist])

        # continue

        model2.summary()
        model2.fit(x=tr_data,
                   y=cat_tr,
                   epochs=param.epochs,
                   batch_size=param.batch_size,
                   validation_data=(te_data, cat_te),
                   verbose=2,
                   callbacks=[tb_hist])

        model1_score = model1.evaluate(x=te_data, y=cat_te, verbose=0)
        print(f"{dt()} :: Test Loss :{model1_score[0]}")
        print(f"{dt()} :: Test Accuracy :{model1_score[1]}")

        model2_score = model2.evaluate(x=te_data, y=cat_te, verbose=0)
        print(f"{dt()} :: Test Loss :{model2_score[0]}")
        print(f"{dt()} :: Test Accuracy :{model2_score[1]}")

        # if repeat == 0:
        #     tracking = [dt(), param.method, param.model_name, param.nb_combine, repeat, model1_score[0], model1_score[1]]
        #     ds.stock_result(tracking)
        #     tracking = [dt(), param.method, param.model_name, param.nb_combine, repeat, model2_score[0], model2_score[1]]
        #     ds.stock_result(tracking)
        # else:
        #     tracking = [dt(), repeat, model1_score[0], model1_score[1]]
        #     ds.stock_result(tracking)
        #     tracking = [dt(), repeat, model2_score[0], model2_score[1]]
        #     ds.stock_result(tracking)

        def predict(model1, model2, data, categorical_y):
            model1_predict = model1.predict(data)
            model2_predict = model2.predict(data)

            counter = np.zeros((3, 1), dtype=int)
            for i in range(len(data[0])):
                average_predictions = (model1_predict[i] +
                                       model2_predict[i]) / 2
                model1_y = np.argmax(model1_predict[i])
                model2_y = np.argmax(model2_predict[i])
                avg_y = np.argmax(average_predictions)

                if model1_y == np.argmax(categorical_y[i]): counter[0] += 1
                if model2_y == np.argmax(categorical_y[i]): counter[1] += 1
                if avg_y == np.argmax(categorical_y[i]): counter[2] += 1

            model1_accuracy = counter[0] / len(tr_data[0])
            model2_accuracy = counter[1] / len(tr_data[0])
            ens_model_avg = counter[2] / len(tr_data[0])
            print(f"{dt()} :: Ensemble Model 1 Accuracy : {model1_accuracy}")
            print(f"{dt()} :: Ensemble Model 2 Accuracy : {model2_accuracy}")
            print(f"{dt()} :: Ensemble AVG Accuracy : {ens_model_avg}")

            return model1_accuracy, model2_accuracy, ens_model_avg

        print(f"{dt()} :: Train Ensemble Result")
        _, _, _ = predict(model1, model2, tr_data, cat_tr)
        print(f"{dt()} :: Test Ensemble Result")
        ens_model1, ens_model2, ens_avg = predict(model1, model2, te_data,
                                                  cat_te)

        tracking1 = [
            dt(), param.method, 'model1_cnn', param.nb_combine, repeat,
            model1_score[0], model1_score[1]
        ]
        tracking2 = [
            dt(), param.method, 'model2_lstm', param.nb_combine, repeat,
            model2_score[0], model2_score[1]
        ]
        tracking3 = [
            dt(), param.method, 'avg_ensemble', param.nb_combine, repeat,
            f"m1{ens_model1}_m2{ens_model2}", ens_avg[0]
        ]

        ds.stock_result(tracking1)
        ds.stock_result(tracking2)
        ds.stock_result(tracking3)

        ds.save_result_obo(param, tracking3)

        model_result = None
        model_score = None
        tracking = None
        tr_data = None
        te_date = None
        # K.clear_session()
        tf.keras.backend.clear_session()
Ejemplo n.º 3
0
def deep_learning_experiment_configuration(param, train, test, label_info):
    nb_class = label_info[0]
    nb_people = label_info[1]

    if param.method == method_select['people']:
        nb_repeat = nb_people
    elif param.method in method_select['repeat']:
        nb_repeat = 20
    elif param.method in method_select["CrossValidation"]:
        nb_repeat = param.collect["CrossValidation"] * 5

    # config = tf.ConfigProto()
    # config.gpu_options.allow_growth = True
    for repeat in range(nb_repeat):
        # if repeat != 1:
        #     continue
        # config = tf.compat.v1.ConfigProto()

        # sess = tf.Session(config=config)
        # use GPU memory in the available GPU memory capacity

        # sess = tf.compat.v1.Session(config=config)
        # set_session(sess)

        print(f"{dt()} :: {repeat+1}/{nb_repeat} experiment progress")

        tartr = train[repeat]
        tarte = test[repeat]

        tr_data = list()
        te_data = list()
        for idx in range(param.nb_modal):
            tr_data.append(tartr[f"data_{idx}"])
            te_data.append(tarte[f"data_{idx}"])

        if param.datatype == "type":
            tr_label = tartr["tag"] - 1
            te_label = tarte["tag"] - 1
            nb_class = label_info[0]
        elif param.datatype == "disease":
            tr_label = tartr["tag"]
            te_label = tarte["tag"]
            nb_class = label_info[0]

        cat_tr = preprocessing.to_categorical(tr_label, nb_class)
        cat_te = preprocessing.to_categorical(te_label, nb_class)

        model = model_compactor.model_setting(param, train[repeat],
                                              test[repeat],
                                              [nb_class, nb_people])
        print(f"{dt()} :: MODEL={param.model_name}, METHOD={param.method}")

        log_dir = f"../Log/{param.model_name}_{param.method}"
        # log_dir = f"/home/blackcow/mlpa/workspace/gait-rework/gait-rework/Log/{param.model_name}_{param.method}"

        tb_hist = tf.keras.callbacks.TensorBoard(log_dir=log_dir,
                                                 histogram_freq=0,
                                                 write_graph=True,
                                                 write_images=True)

        model.summary()
        model_result = model.fit(x=tr_data,
                                 y=cat_tr,
                                 epochs=param.epochs,
                                 batch_size=param.batch_size,
                                 validation_data=(te_data, cat_te),
                                 verbose=2,
                                 callbacks=[tb_hist])

        model_score = model.evaluate(x=te_data, y=cat_te, verbose=0)

        print(f"{dt()} :: Test Loss :{model_score[0]}")
        print(f"{dt()} :: Test Accuracy :{model_score[1]}")

        if repeat == 0:
            tracking = [
                dt(), param.method, param.model_name, param.nb_combine, repeat,
                model_score[0], model_score[1]
            ]
            ds.stock_result(tracking)
        else:
            tracking = [dt(), repeat, model_score[0], model_score[1]]
            ds.stock_result(tracking)

        ds.save_result_obo(param, tracking)

        model_result = None
        model_score = None
        tracking = None
        tr_data = None
        te_date = None
        # K.clear_session()
        tf.keras.backend.clear_session()