Пример #1
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--batch_size', type=int, default=64)
    parser.add_argument('--model',
                        type=str,
                        default='rn',
                        choices=['baseline', 'rn'])
    parser.add_argument('--prefix', type=str, default='default')
    parser.add_argument('--checkpoint', type=str, default=None)
    parser.add_argument('--dataset_path',
                        type=str,
                        default='Sort-of-CLEVR_default')
    parser.add_argument('--learning_rate', type=float, default=5.0e-4)
    parser.add_argument('--lr-weight-decay',
                        action='store_true',
                        default=False)
    parser.add_argument('--epochs', type=int, default=100)
    parser.add_argument('--cuda', action='store_false', default=True)
    config = parser.parse_args()

    path = os.path.join('./DataGenerator/datasets', config.dataset_path)

    if check_data_path(path):
        import dataset as dataset
    else:
        raise ValueError(path)

    config.data_info = dataset.get_data_info()
    config.conv_info = dataset.get_conv_info()

    dataset_train, dataset_valid, dataset_test = dataset.create_default_splits(
        path)

    trainer = Trainer(config)

    log.warning("dataset: %s, learning_rate: %f" %
                (config.dataset_path, config.learning_rate))
    train_loader = torch.utils.data.DataLoader(dataset_train,
                                               batch_size=config.batch_size,
                                               shuffle=True,
                                               num_workers=4)
    valid_loader = torch.utils.data.DataLoader(dataset_valid,
                                               batch_size=config.batch_size,
                                               shuffle=True,
                                               num_workers=4)
    test_loader = torch.utils.data.DataLoader(dataset_test,
                                              batch_size=config.batch_size,
                                              shuffle=False,
                                              num_workers=4)
    trainer.train(train_loader, valid_loader)
    trainer = Trainer.load_model('model/checkpoint_final.pth')
    test_accuracy = trainer.test(test_loader)
    print("Test Accuracy: %.3f %%" % (test_accuracy * 100, ))
Пример #2
0
def train_nntrainer():
    train_data_size, val_data_size, label_size, feature_size = dataset.get_data_info()
    InVec, InLabel, ValVec, ValLabel = dataset.load_data()

    print('reading is done')
    inputs = tf.placeholder(tf.float32, [None, feature_size], name="input_X")
    labels = tf.placeholder(tf.float32,[None, label_size], name = "label")

    model = models.Sequential()
    model.add(Conv2D(6, (5,5), padding='valid', activation='sigmoid', input_shape=(28,28,1), kernel_initializer=initializers.Zeros(), bias_initializer=initializers.Zeros()))
    model.add(AveragePooling2D(pool_size=(2,2)))
    model.add(Conv2D(12, (5,5), padding='valid', activation='sigmoid', kernel_initializer=initializers.Zeros(), bias_initializer=initializers.Zeros()))
    model.add(AveragePooling2D(pool_size=(2,2)))
    model.add(Flatten())
    model.add(layers.Dense(10,activation='softmax',kernel_initializer=initializers.Zeros(), bias_initializer=initializers.Zeros()))

    model.compile(optimizer = optimizers.Adam(lr=1e-4),
                  loss=tf.keras.losses.CategoricalCrossentropy(),
                  metrics=['accuracy'])

    model.summary()

    model_Hist = History_LAW()

    history = model.fit(datagen(InVec, InLabel, batch_size), epochs=num_epoch, steps_per_epoch=len(InVec)//batch_size, validation_data=datagen(ValVec, ValLabel, batch_size), validation_steps=len(ValVec)//batch_size, callbacks=[model_Hist])
    
    count =0
    
    if not os.path.exists('./nntrainer_tfmodel'):
        os.mkdir('./nntrainer_tfmodel')
        model.save('./nntrainer_tfmodel/nntrainer_keras.h5')

    ValVec = np.reshape(ValVec, (label_size*val_data_size, 1,28,28))
    ValVec = np.transpose(ValVec, [0,2,3,1])
    score = model.evaluate(ValVec, ValLabel, verbose=1)
    print("%s: %.5f%%" % (model.metrics_names[1], score[1]*100))
Пример #3
0
def train_nntrainer(target):
    train_data_size, val_data_size, label_size, feature_size = dataset.get_data_info(
        target)
    InVec, InLabel, ValVec, ValLabel = dataset.load_data(target)

    model = create_model()
    model.summary()

    if USE_FIT == False:
        ## Method 1 : using tensorflow session (training and evaluating manually)
        inputs = tf.placeholder(tf.float32, [None, 32, 32, 3], name="input_X")
        labels = tf.placeholder(tf.float32, [None, 100], name="label")
        sess = tf.compat.v1.Session()

        tf_logit = model(inputs, training=True)
        tf_loss = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels,
                                                       logits=tf_logit))
        lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
            initial_learning_rate=1e-2, decay_steps=10000, decay_rate=0.96)
        optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule,
                                             epsilon=1.0e-7,
                                             beta_1=0.9,
                                             beta_2=0.999)

        trainable_variables = tf.compat.v1.trainable_variables()
        tf_grad = optimizer.get_gradients(tf_loss, params=trainable_variables)
        train_op = optimizer.apply_gradients(zip(tf_grad, trainable_variables))
        var_to_run = [
            train_op, tf_loss,
            tf.reduce_sum(
                tf.cast(
                    tf.equal(tf.math.argmax(tf.nn.softmax(tf_logit), axis=1),
                             tf.math.argmax(labels, axis=1)), tf.float32)) /
            batch_size
        ]

        tf_logit_eval = model(inputs, training=False)
        tf_loss = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels,
                                                       logits=tf_logit_eval))
        tf_logit_eval = tf.nn.softmax(tf_logit_eval)
        tf_accuracy = tf.reduce_sum(
            tf.cast(
                tf.equal(tf.math.argmax(tf_logit_eval, axis=1),
                         tf.math.argmax(labels, axis=1)),
                tf.float32)) / batch_size
        infer_to_run = [tf_accuracy, tf_loss]

        sess.run(tf.compat.v1.global_variables_initializer())

        conv2_0 = np.transpose(model.get_weights()[0], [3, 2, 0, 1])
        conv2_1 = np.transpose(model.get_weights()[2], [3, 2, 0, 1])
        conv2_2 = np.transpose(model.get_weights()[4], [3, 2, 0, 1])
        conv2_3 = np.transpose(model.get_weights()[6], [3, 2, 0, 1])
        conv2_4 = np.transpose(model.get_weights()[8], [3, 2, 0, 1])
        conv2_5 = np.transpose(model.get_weights()[10], [3, 2, 0, 1])
        conv2_6 = np.transpose(model.get_weights()[12], [3, 2, 0, 1])
        conv2_7 = np.transpose(model.get_weights()[14], [3, 2, 0, 1])
        conv2_8 = np.transpose(model.get_weights()[16], [3, 2, 0, 1])
        conv2_9 = np.transpose(model.get_weights()[18], [3, 2, 0, 1])
        conv2_10 = np.transpose(model.get_weights()[20], [3, 2, 0, 1])
        conv2_11 = np.transpose(model.get_weights()[22], [3, 2, 0, 1])
        conv2_12 = np.transpose(model.get_weights()[24], [3, 2, 0, 1])

        bn_1_0 = np.transpose(model.get_weights()[26])
        bn_1_1 = np.transpose(model.get_weights()[27])
        bn_1_2 = np.transpose(model.get_weights()[28])
        bn_1_3 = np.transpose(model.get_weights()[29])

        fc_0_0 = np.transpose(model.get_weights()[30])
        fc_0_1 = np.transpose(model.get_weights()[31])

        bn_2_0 = np.transpose(model.get_weights()[32])
        bn_2_1 = np.transpose(model.get_weights()[33])
        bn_2_2 = np.transpose(model.get_weights()[34])
        bn_2_3 = np.transpose(model.get_weights()[35])

        fc_1_0 = np.transpose(model.get_weights()[36])
        fc_1_1 = np.transpose(model.get_weights()[37])

        bn_3_0 = np.transpose(model.get_weights()[38])
        bn_3_1 = np.transpose(model.get_weights()[39])
        bn_3_2 = np.transpose(model.get_weights()[40])
        bn_3_3 = np.transpose(model.get_weights()[41])

        fc_2_0 = np.transpose(model.get_weights()[42])
        fc_2_1 = np.transpose(model.get_weights()[43])

        save("model.bin", conv2_0)
        save("model.bin", model.get_weights()[1])
        save("model.bin", conv2_1)
        save("model.bin", model.get_weights()[3])
        save("model.bin", conv2_2)
        save("model.bin", model.get_weights()[5])
        save("model.bin", conv2_3)
        save("model.bin", model.get_weights()[7])
        save("model.bin", conv2_4)
        save("model.bin", model.get_weights()[9])
        save("model.bin", conv2_5)
        save("model.bin", model.get_weights()[11])
        save("model.bin", conv2_6)
        save("model.bin", model.get_weights()[13])
        save("model.bin", conv2_7)
        save("model.bin", model.get_weights()[15])
        save("model.bin", conv2_8)
        save("model.bin", model.get_weights()[17])
        save("model.bin", conv2_9)
        save("model.bin", model.get_weights()[19])
        save("model.bin", conv2_10)
        save("model.bin", model.get_weights()[21])
        save("model.bin", conv2_11)
        save("model.bin", model.get_weights()[23])
        save("model.bin", conv2_12)
        save("model.bin", model.get_weights()[25])

        save("model.bin", bn_1_0)
        save("model.bin", bn_1_1)
        save("model.bin", bn_1_2)
        save("model.bin", bn_1_3)
        save("model.bin", fc_0_0)
        save("model.bin", fc_0_1)
        save("model.bin", bn_2_0)
        save("model.bin", bn_2_1)
        save("model.bin", bn_2_2)
        save("model.bin", bn_2_3)
        save("model.bin", fc_1_0)
        save("model.bin", fc_1_1)
        save("model.bin", bn_3_0)
        save("model.bin", bn_3_1)
        save("model.bin", bn_3_2)
        save("model.bin", bn_3_3)
        save("model.bin", fc_2_0)
        save("model.bin", fc_2_1)

        for i in range(0, num_epoch):
            count = 0
            accuracy = 0
            loss = 0
            for x, y in datagen(InVec, InLabel, batch_size):
                feed_dict = {inputs: x, labels: y}
                tf_out = sess.run(var_to_run, feed_dict=feed_dict)
                loss += tf_out[1]
                accuracy += tf_out[2]
                count = count + 1
                if count == len(InVec) // batch_size:
                    break

            training_accuracy = (accuracy / count) * 100.0
            training_loss = loss / count

            count = 0
            accuracy = 0
            loss = 0
            for x, y in datagen(ValVec, ValLabel, batch_size):
                feed_dict = {inputs: x, labels: y}
                infer_out = sess.run(infer_to_run, feed_dict=feed_dict)
                accuracy += infer_out[0]
                loss += infer_out[1]
                count = count + 1
                if count == len(ValVec) // batch_size:
                    break

            accuracy = (accuracy / count) * 100.0
            loss = loss / count
            print(
                '#{}/{} - Training Loss: {:10.6f} - Training Accuracy: {:10.6f} >> [ Accuracy: {:10.6f}% - Validation Loss : {:10.6f} ]'
                .format(i + 1, num_epoch, training_loss, training_accuracy,
                        accuracy, loss))
    else:
        ## Method 1 : using keras fit (training and evaluating manually)
        lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
            initial_learning_rate=1e-2, decay_steps=10000, decay_rate=0.96)
        optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule,
                                             epsilon=1.0e-7,
                                             beta_1=0.9,
                                             beta_2=0.999)

        model.compile(
            optimizer=optimizer,
            loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
            metrics=['accuracy'])
        model.fit(datagen(InVec, InLabel, batch_size),
                  epochs=num_epoch,
                  steps_per_epoch=len(InVec) // batch_size,
                  validation_data=datagen(ValVec, ValLabel, batch_size),
                  validation_steps=len(ValVec) // batch_size,
                  shuffle=False)
Пример #4
0
def train_nntrainer(target):
    train_data_size, val_data_size, label_size, feature_size = dataset.get_data_info(
        target)
    InVec, InLabel, ValVec, ValLabel = dataset.load_data(target)

    model = create_model()
    model.summary()

    if USE_FIT == False:
        ## Method 1 : using tensorflow session (training and evaluating manually)
        inputs = tf.placeholder(tf.float32, [None, 28, 28, 1], name="input_X")
        labels = tf.placeholder(tf.float32, [None, 10], name="label")
        sess = tf.compat.v1.Session()

        tf_logit = model(inputs, training=True)
        tf_loss = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels,
                                                       logits=tf_logit))
        optimizer = tf.keras.optimizers.Adam(learning_rate=1.0e-4,
                                             epsilon=1.0e-7,
                                             beta_1=0.9,
                                             beta_2=0.999)

        trainable_variables = tf.compat.v1.trainable_variables()
        tf_grad = optimizer.get_gradients(tf_loss, params=trainable_variables)
        train_op = optimizer.apply_gradients(zip(tf_grad, trainable_variables))

        var_to_run = [train_op, tf_loss]
        infer_to_run = [
            tf.reduce_sum(
                tf.cast(
                    tf.equal(tf.math.argmax(tf.nn.softmax(tf_logit), axis=1),
                             tf.math.argmax(labels, axis=1)), tf.float32)) /
            batch_size, tf_loss
        ]

        sess.run(tf.compat.v1.global_variables_initializer())

        conv2_0 = np.transpose(model.get_weights()[0], [3, 2, 0, 1])
        conv2_1 = np.transpose(model.get_weights()[2], [3, 2, 0, 1])
        save("model.bin", conv2_0)
        save("model.bin", model.get_weights()[1])
        save("model.bin", conv2_1)
        save("model.bin", model.get_weights()[3])
        save("model.bin", model.get_weights()[4])
        save("model.bin", model.get_weights()[5])

        for i in range(0, num_epoch):
            count = 0
            loss = 0
            for x, y in datagen(InVec, InLabel, batch_size):
                feed_dict = {inputs: x, labels: y}
                tf_out = sess.run(var_to_run, feed_dict=feed_dict)
                loss += tf_out[1]
                count = count + 1

                if count == len(InVec) // batch_size:
                    break

            training_loss = loss / count

            count = 0
            accuracy = 0
            loss = 0
            for x, y in datagen(ValVec, ValLabel, batch_size):
                feed_dict = {inputs: x, labels: y}
                infer_out = sess.run(infer_to_run, feed_dict=feed_dict)
                accuracy += infer_out[0]
                loss += infer_out[1]
                count = count + 1
                if count == len(InVec) // batch_size:
                    break
            accuracy = (accuracy / count) * 100.0
            loss = loss / count

            print(
                '#{}/{} - Training Loss: {:10.6f} >> [ Accuracy: {:10.6f}% - Valiadtion Loss : {:10.6f} ]'
                .format(i + 1, num_epoch, training_loss, accuracy, loss))
    else:
        ## Method 1 : using keras fit (training and evaluating manually)
        optimizer = optimizers.Adam(learning_rate=1.0e-4,
                                    beta_1=0.9,
                                    beta_2=0.999,
                                    epsilon=1.0e-7)
        model.compile(
            optimizer=optimizer,
            loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
            metrics=['accuracy'])
        model.fit(datagen(InVec, InLabel, batch_size),
                  epochs=num_epoch,
                  steps_per_epoch=len(InVec) // batch_size,
                  validation_data=datagen(ValVec, ValLabel, batch_size),
                  validation_steps=len(ValVec) // batch_size,
                  shuffle=False)
Пример #5
0
import argparse
import dataset, word2vec as w2v
import config
import model

if __name__ == '__main__':
    # parser = argparse.ArgumentParser()
    # parser.add_argument(
    #     '--w2v-train',
    #     choices=[True, False],
    #     type=bool,
    #     default=False,
    #     help='whether train w2v features or use saved model')
    #
    # args = parser.parse_args()

    config.setup()

    data_size, label_to_index, num_labels, max_sentence_length = dataset.get_data_info(
    )

    w2v_model = w2v.load_or_create_w2v_model()
    lexicon, word_to_index, index_to_word = dataset.create_lexicon_from_w2v(
        w2v_model)

    model.build_and_train_model(w2v_model, word_to_index, label_to_index,
                                max_sentence_length, num_labels + 1)