Exemplo n.º 1
0
def run_model(filename, config):
    os.environ['CUDA_VISIBLE_DEVICES'] = '0'
    gpu_config = tf.ConfigProto()
    gpu_config.gpu_options.allow_growth = True

    testing_data, testing_label = utils.load_data(filename)
    testing_label, num_classes = utils.transfer_labels(testing_label)

    config.class_num = num_classes
    config.embedding_size = 1
    config.batch_size = testing_data.shape[0]
    config.num_steps = testing_data.shape[1]

    test_noise_data = np.zeros(shape=testing_data.shape)
    
    with tf.Session(config=gpu_config) as sess:
        model = RNN_clustering_model(config=config)
        input_tensors, loss_tensors, hidden_abstract, F_update, output_tensor = model.build_model()

        sess.run(tf.global_variables_initializer())
        
        # model_path = './Model/model.ckpt'
        model_path = './model_test/beetlefly'
        saver = tf.train.Saver()
        saver.restore(sess, model_path)
     
        test_total_abstract = sess.run(hidden_abstract,
                feed_dict={input_tensors['inputs']: testing_data, 
                input_tensors['noise']: test_noise_data
                })

    test_hidden_val = np.array(test_total_abstract).reshape(-1, np.sum(config.hidden_size) * 2)
    km = KMeans(n_clusters=num_classes)
    km_idx = km.fit_predict(test_hidden_val)
    ri, nmi, acc = utils.evaluation(prediction=km_idx, label=testing_label)
Exemplo n.º 2
0
def run_model(train_data_filename, config):
    os.environ['CUDA_VISIBLE_DEVICES'] = '0'
    gpu_config = tf.ConfigProto()
    gpu_config.gpu_options.allow_growth = True

    train_data, train_label = utils.load_data(train_data_filename)
    
    config.batch_size = train_data.shape[0]
    # config.batch_size = 10
    config.num_steps = train_data.shape[1]
    config.embedding_size = 1

    train_label, num_classes = utils.transfer_labels(train_label)
    config.class_num = num_classes

    print('Label:', np.unique(train_label))


    with tf.Session(config=gpu_config) as sess:
        model = RNN_clustering_model(config=config)
        input_tensors, loss_tensors, real_hidden_abstract, F_update, output_tensor = model.build_model()
        train_op = tf.train.AdamOptimizer(config.learning_rate).minimize(loss_tensors['loss'])

        sess.run(tf.global_variables_initializer())

        Epoch = 30
    
        for i in range(Epoch):
            # shuffle data and label
            indices = np.random.permutation(train_data.shape[0])
            shuffle_data = train_data[indices]
            shuffle_label = train_label[indices]
    
            row = train_data.shape[0]
            batch_len = int(row / config.batch_size)
            left_row = row - batch_len * config.batch_size

            if left_row != 0:
                need_more = config.batch_size - left_row
                rand_idx = np.random.choice(np.arange(batch_len * config.batch_size), size=need_more)
                shuffle_data = np.concatenate((shuffle_data, shuffle_data[rand_idx]), axis=0)
                shuffle_label = np.concatenate((shuffle_label, shuffle_label[rand_idx]), axis=0)
        assert shuffle_data.shape[0] % config.batch_size == 0

        noise_data = np.random.normal(loc=0, scale=0.1, size=[shuffle_data.shape[0]*2, shuffle_data.shape[1]])
        total_abstract = []
        print('----------Epoch %d----------' % i)
        k = 0

        for input, _ in utils.next_batch(config.batch_size, shuffle_data):
            noise = noise_data[k * config.batch_size * 2: (k + 1) * config.batch_size * 2, :]
        fake_input, train_real_fake_labels = utils.get_fake_sample(input)
        loss_val, abstract, _ = sess.run(
            [loss_tensors['loss'], real_hidden_abstract, train_op],
            feed_dict={input_tensors['inputs']: np.concatenate((input, fake_input), axis=0),
                        input_tensors['noise']: noise,
                        input_tensors['real_fake_label']: train_real_fake_labels
                        })
        print(loss_val)
        total_abstract.append(abstract)

        k += 1
        if i % 10 == 0 and i != 0:
            part_hidden_val = np.array(abstract).reshape(-1, np.sum(config.hidden_size) * 2)
            W = part_hidden_val.T
            U, sigma, VT = np.linalg.svd(W)
            sorted_indices = np.argsort(sigma)
            topk_evecs = VT[sorted_indices[:-num_classes - 1:-1], :]
            F_new = topk_evecs.T
            sess.run(F_update, feed_dict={input_tensors['F_new_value']: F_new})

        saver = tf.train.Saver()
        saver.save(sess, "model_test/beetlefly")
Exemplo n.º 3
0
"""
a p file is a list: [left_hand_skeleton, right_hand_skeleton, left_leg_skeleton, right_leg_skeleton, central_trunk_skeleton, labels]
the shape of the first five ones: (num_samples, time_length, num_joints)
the shape of the last one: (num_samples,)
"""
filepath_train = './dataset/MSRAction3D_real_world_P4_Split_AS3_train.p'
filepath_test = './dataset/MSRAction3D_real_world_P4_Split_AS3_test.p'
data_train = cp.load(open(filepath_train, 'rb'))
skeletons_train = data_train[0:5]
labels_train = data_train[5]
data_test = cp.load(open(filepath_test, 'rb'))
skeletons_test = data_test[0:5]
labels_test = data_test[5]

print('Transfering labels...')
labels_train, labels_test, num_classes = utils.transfer_labels(labels_train, labels_test)

"""
set parameters of reservoirs, create five reservoirs and get echo states of five skeleton parts
"""
num_samples_train = labels_train.shape[0]
num_samples_test = labels_test.shape[0]

_, time_length, n_in = skeletons_train[0].shape
n_res = n_in * 3
IS = 0.1
SR = 0.9
sparsity = 0.3
leakyrate = 1.0

#create five different reservoirs, one for a skeleton part
Exemplo n.º 4
0
def main(config):
    os.environ['CUDA_VISIBLE_DEVICES'] = config.GPU
    gpu_config = tf.ConfigProto()
    gpu_config.gpu_options.allow_growth = True

    print('Loading data && Transform data--------------------')
    print(config.train_data_filename)
    train_data, train_label = utils.load_data(config.train_data_filename)

    #For univariate
    config.num_steps = train_data.shape[1]
    config.input_dimension_size = 1

    train_label, num_classes = utils.transfer_labels(train_label)
    config.class_num = num_classes

    print('Train Label:', np.unique(train_label))
    print('Train data completed-------------')

    test_data, test_labels = utils.load_data(config.test_data_filename)

    test_label, test_classes = utils.transfer_labels(test_labels)
    print('Test data completed-------------')

    with tf.Session(config=gpu_config) as sess:

        G = Generator(config=config)
        input_tensors, loss_tensors, accuracy, prediction, M, Label_predict, prediction_target, Last_hidden_output = G.build_model(
        )

        real_pre = prediction * (1 - M) + prediction_target * M
        real_pre = tf.reshape(real_pre, [
            config.batch_size,
            (config.num_steps - 1) * config.input_dimension_size
        ])
        D = Discriminator(config)
        predict_M = D(real_pre)

        predict_M = tf.reshape(
            predict_M,
            [-1, (config.num_steps - 1) * config.input_dimension_size])

        D_loss = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(logits=predict_M,
                                                    labels=M))
        G_loss = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(logits=predict_M,
                                                    labels=1 - M) * (1 - M))

        total_G_loss = loss_tensors['loss'] + config.lamda_D * G_loss

        #D_solver
        D_solver = tf.train.AdamOptimizer(config.learning_rate).minimize(
            D_loss, var_list=D.vars)

        #G_solver
        G_solver = tf.train.AdamOptimizer(config.learning_rate).minimize(
            total_G_loss, var_list=G.vars)

        #global_variables_initializer
        sess.run(tf.global_variables_initializer())

        #------------------------------------------------train---------------------------------------------------
        Epoch = config.epoch

        for i in range(Epoch):
            total_loss = []
            total_batch_d_loss = []
            total_batch_g_loss = []
            total_train_accuracy = []

            print('----------Epoch %d----------' % i)
            '''train'''
            for input, prediction_target, mask, label_target, _, batch_need_label in utils.next_batch(
                    config.batch_size,
                    train_data,
                    train_label,
                    True,
                    config.input_dimension_size,
                    config.num_steps,
                    Trainable=True):
                for _ in range(config.D_epoch):
                    _, batch_d_loss, p_M, real_M = sess.run(
                        [D_solver, D_loss, predict_M, M],
                        feed_dict={
                            input_tensors['input']: input,
                            input_tensors['prediction_target']:
                            prediction_target,
                            input_tensors['mask']: mask,
                            input_tensors['label_target']: label_target,
                            input_tensors['lstm_keep_prob']: 1.0,
                            input_tensors['classfication_keep_prob']: 1.0
                        })
                total_batch_d_loss.append(batch_d_loss)
                for _ in range(config.G_epoch):
                    batch_loss, batch_g_loss, batch_accuracy, _, batch_train_Pre, batch_train_hidden = sess.run(
                        [
                            loss_tensors['loss'], G_loss, accuracy, G_solver,
                            prediction, Last_hidden_output
                        ],
                        feed_dict={
                            input_tensors['input']: input,
                            input_tensors['prediction_target']:
                            prediction_target,
                            input_tensors['mask']: mask,
                            input_tensors['label_target']: label_target,
                            input_tensors['lstm_keep_prob']: 1.0,
                            input_tensors['classfication_keep_prob']: 1.0
                        })
                total_loss.append(batch_loss)
                total_batch_g_loss.append(batch_g_loss)
                total_train_accuracy.append(batch_accuracy)

            print("Loss:", np.mean(total_loss), "Train acc:",
                  np.mean(np.array(total_train_accuracy).reshape(-1)))
        '''test'''
        total_test_accuracy = []
        total_sample_num = 0
        total_Pre = []
        for input, prediction_target, mask, label_target, batch_size, batch_need_label in utils.next_batch(
                config.batch_size,
                test_data,
                test_label,
                True,
                config.input_dimension_size,
                config.num_steps,
                Trainable=False):
            total_sample_num += batch_size
            batch_accuracy, batch_Pre, batch_Label_predict, batch_test_hidden = sess.run(
                [accuracy, prediction, Label_predict, Last_hidden_output],
                feed_dict={
                    input_tensors['input']: input,
                    input_tensors['prediction_target']: prediction_target,
                    input_tensors['mask']: mask,
                    input_tensors['label_target']: label_target,
                    input_tensors['lstm_keep_prob']: 1.0,
                    input_tensors['classfication_keep_prob']: 1.0
                })
            total_test_accuracy.append(batch_accuracy)
            total_Pre.append(batch_Pre)

        assert total_sample_num == test_data.shape[0]
        Test_acc = np.mean(
            np.array(total_test_accuracy).reshape(-1)[:total_sample_num])

        print('Test acc:', Test_acc)
Exemplo n.º 5
0
def run_MSR_MSMC(args):
    # load data
    skeletons_train, labels_train = get_data(args["input_train_file"])
    skeletons_test, labels_test = get_data(args["input_test_file"])

    # one hot of labels
    labels_train, labels_test, num_classes = utils.transfer_labels(
        labels_train, labels_test)
    """
    set parameters of reservoirs, create five reservoirs and get echo states of five skeleton parts
    """
    num_samples_train, num_samples_test = labels_train.shape[
        0], labels_test.shape[0]

    _, time_length, n_in = skeletons_train[0].shape

    if args["use_ESN"]:
        n_res = n_in * args["expansion_factor"]

        # GET RESERVOIRS ####
        reservoirs = []
        if args["train"]:
            if args["common_reservoir_for_limbs"]:
                # create three different reservoirs
                reservoirs = [
                    reservoir.reservoir_layer(n_in, n_res, args["IS"],
                                              args["SR"], args["sparsity"],
                                              args["leakyrate"])
                    for i in range(3)
                ]
                # left_hand, right_hand, left_leg, right_leg, trunk
            else:
                # create five different reservoirs, one for a skeleton part - 5 parts
                reservoirs = [
                    reservoir.reservoir_layer(n_in, n_res, args["IS"],
                                              args["SR"], args["sparsity"],
                                              args["leakyrate"])
                    for i in range(skeleton_parts)
                ]
            with open(args["reservoir_file"], 'wb') as f:
                pickle.dump(reservoirs, f)
        else:
            # for test time we will use the reservoir from training
            with open(args["reservoir_file"], 'rb') as f:
                reservoirs = pickle.load(f)
                n_res = reservoirs[0].n_res

        reservoir_nums = [0, 0, 1, 1, 2
                          ] if len(reservoirs) != skeleton_parts else list(
                              range(skeleton_parts))

        # GET ECHO STATES for the skeletons ####
        echo_states_train = [
            np.empty((num_samples_train, 1, time_length, n_res), np.float32)
            for i in range(skeleton_parts)
        ]
        echo_states_test = [
            np.empty((num_samples_test, 1, time_length, n_res), np.float32)
            for i in range(skeleton_parts)
        ]
        for i, reservoir_num in enumerate(reservoir_nums):
            echo_states_train[i][:, 0, :, :] = reservoirs[
                reservoir_num].get_echo_states(skeletons_train[i])
            echo_states_test[i][:, 0, :, :] = reservoirs[
                reservoir_num].get_echo_states(skeletons_test[i])
        echo_states_train = [
            np.concatenate(echo_states_train[0:2], axis=1),
            np.concatenate(echo_states_train[2:4], axis=1),
            echo_states_train[4]
        ]
        echo_states_test = [
            np.concatenate(echo_states_test[0:2], axis=1),
            np.concatenate(echo_states_test[2:4], axis=1), echo_states_test[4]
        ]

        input_train, input_test = echo_states_train, echo_states_test

    else:
        # if we dont want to use reservoirs reshape the SKELETONS and feed to a Conv Entwork
        n_res = n_in
        skeletons_train_ = [np.expand_dims(x, 1) for x in skeletons_train]
        skeletons_test_ = [np.expand_dims(x, 1) for x in skeletons_test]

        skeletons_train_ = [
            np.concatenate(skeletons_train_[0:2], axis=1),
            np.concatenate(skeletons_train_[2:4], axis=1), skeletons_train_[4]
        ]
        skeletons_test_ = [
            np.concatenate(skeletons_test_[0:2], axis=1),
            np.concatenate(skeletons_test_[2:4], axis=1), skeletons_test_[4]
        ]

        input_train, input_test = skeletons_train_, skeletons_test_

    # TRAIN MSMC MODEL ####
    if args["train"]:

        input_shapes = ((2, time_length, n_res), (2, time_length, n_res),
                        (1, time_length, n_res))
        inputs = []
        features = []

        # BUILD THE MSMC decoder model ####
        for i in range(3):
            input = Input(shape=input_shapes[i])
            inputs.append(input)

            pools = []
            for j in range(len(args["nb_row"])):
                conv = Conv2D(args["nb_filter"], (args["nb_row"][j], n_res),
                              kernel_initializer=args["kernel_initializer"],
                              activation=args["activation"],
                              padding=args["padding"],
                              strides=args["strides"],
                              data_format=args["data_format"])(input)
                pool = GlobalMaxPooling2D(
                    data_format=args["data_format"])(conv)
                pools.append(pool)

            features.append(concatenate(pools))
        """
        hands_features = features[0]
        legs_features = features[1]
        trunk_features = features[2]
        """
        body_features = Dense(args["nb_filter"] * len(args["nb_row"]),
                              kernel_initializer=args["kernel_initializer"],
                              activation=args["activation"])(
                                  concatenate(features))

        outputs = Dense(num_classes,
                        kernel_initializer=args["kernel_initializer"],
                        activation='softmax')(body_features)

        model = Model(inputs=inputs, outputs=outputs)

        model.compile(optimizer=args["optimizer"],
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])

        # CALLBACKS ####
        log_file = args["log_dir"] + "/{}_res{}_com{}_split{}".format(
            get_time(), args["use_ESN"], args["common_reservoir_for_limbs"],
            args["split_number"])
        tensorboard = TensorBoard(log_dir=log_file,
                                  histogram_freq=0,
                                  batch_size=args["batch_size"],
                                  write_graph=True,
                                  write_grads=False,
                                  write_images=True,
                                  embeddings_freq=0,
                                  embeddings_layer_names=None,
                                  embeddings_metadata=None)
        checkpoint = ModelCheckpoint(args["checkpoint_file"],
                                     monitor='val_acc',
                                     verbose=args["verbose"],
                                     save_best_only=True,
                                     mode='max')
        callbacks_list = [checkpoint, tensorboard]

        # FIT MODEL ####
        model.fit(input_train,
                  labels_train,
                  batch_size=args["batch_size"],
                  epochs=args["nb_epochs"],
                  verbose=args["verbose"],
                  validation_data=(input_test, labels_test),
                  callbacks=callbacks_list)

    # LOAD BEST MODEL ####
    try:
        model = load_model(args["checkpoint_file"])
    except OSError as err:
        print("OS error: {0}".format(err))
        return

    # EVALUATE MODEL ####
    print("==Evaluating==")
    scores = model.evaluate(input_test,
                            labels_test,
                            batch_size=args["batch_size"],
                            verbose=args["verbose"])
    print("{}: {} and loss is {}".format(model.metrics_names[1],
                                         scores[1] * 100, scores[0]))

    labels_test_pred = model.predict(input_test)

    print("parameters :::", model.count_params())
    # print("summary :::", model.summary())
    # print(confusion_matrix(labels_test, labels_test_pred))

    # LOGGING ####
    with open(args["results_file"], "a+") as f:
        print("Reservoir :: {} @ {}".format(args["use_ESN"], get_time()),
              file=f)
        print("Input file :: {}".format(args["input_train_file"]), file=f)
        if args["use_ESN"]:
            print("Common Limb Reservoir :: {}".format(
                args["common_reservoir_for_limbs"]),
                  file=f)
        print("nb_epoch : {}, optimizer : {}, n_res : {}".format(
            args["nb_epochs"], args["optimizer"], n_res),
              file=f)
        print("{}: {} and loss is {}".format(model.metrics_names[1],
                                             scores[1] * 100, scores[0]),
              file=f)
        print("parameters :::", model.count_params(), file=f)
        print("*" * 15, file=f)

    return model.metrics_names[1], scores[1] * 100