def train(run, model_name, data_path, epochs, batch_size, mlflow_custom_log, log_as_onnx):
    x_train, y_train, x_test, y_test = utils.get_train_data(data_path)
    model = build_model()

    model.compile(
        optimizer="rmsprop",
        loss="categorical_crossentropy",
        metrics=["accuracy"])
    model.summary()
    model.fit(x_train, y_train, epochs=epochs, batch_size=batch_size, verbose=1, callbacks=[LogMetricsCallback()])
    print("model.type:", type(model))

    test_loss, test_acc = model.evaluate(x_test, y_test)
    print("test_acc:", test_acc)
    print("test_loss:", test_loss)

    if mlflow_custom_log:
        mlflow.log_param("epochs", epochs)
        mlflow.log_param("batch_size", batch_size)

        mlflow.log_metric("test_acc", test_acc)
        mlflow.log_metric("test_loss", test_loss)

        # Save as TensorFlow SavedModel flavor
        mlflow.keras.log_model(model, "keras-model-tf", save_format="tf")

        # Save as default H5 format
        mlflow.keras.log_model(model, "keras-model-h5")

        # Save as TensorFlow SavedModel format - non-flavor artifact
        path = "keras-model-tf-non-flavor"
        tf.keras.models.save_model(model, path, overwrite=True, include_optimizer=True)
        mlflow.log_artifact(path)

        # write model summary
        summary = []
        model.summary(print_fn=summary.append)
        summary = "\n".join(summary)
        with open("model_summary.txt", "w") as f:
            f.write(summary)
        mlflow.log_artifact("model_summary.txt")

    else:
        # utils.register_model(run, model_name)
        pass

    # write model as yaml file
    with open("model.yaml", "w") as f:
        f.write(model.to_yaml())
    mlflow.log_artifact("model.yaml")

    # MLflow - log onnx model
    if log_as_onnx:
        import onnx_utils
        mname = f"{model_name}_onnx" if model_name else None
        onnx_utils.log_model(model, "onnx-model", mname)

    # predictions = model.predict_classes(x_test)
    predictions = np.argmax(model.predict(x_test), axis=-1)
    print("predictions:", predictions)
Ejemplo n.º 2
0
def compress(saved_model_path,
             tflite_model_path,
             img_size,
             quantize=None,
             device=None):
    converter = lite.TFLiteConverter.from_saved_model(saved_model_path)
    converter.optimizations = [tf.lite.Optimize.DEFAULT]
    converter.target_spec.supported_types = [tf.float16]

    if quantize:
        sample_dataset = DataGenerator(get_train_data(), 10, img_size).sample()
        sample_images = sample_dataset[0]

        def representative_dataset_gen():
            for index in range(sample_images.shape[0] - 1):
                yield [sample_images[index:index + 1]]

        converter.representative_dataset = tf.lite.RepresentativeDataset(
            representative_dataset_gen)
        converter.target_spec.supported_ops = [
            tf.lite.OpsSet.TFLITE_BUILTINS_INT8, tf.lite.OpsSet.SELECT_TF_OPS
        ]
        converter.inference_input_type = tf.uint8
        converter.inference_output_type = tf.uint8

    tflite_model = converter.convert()
    x = open(tflite_model_path, "wb").write(tflite_model)
    print(x)
Ejemplo n.º 3
0
def train(run, model_name, data_path, epochs, batch_size, mlflow_custom_log,
          log_as_onnx):
    x_train, y_train, x_test, y_test = utils.get_train_data(data_path)
    model = build_model()

    model.compile(optimizer="rmsprop",
                  loss="categorical_crossentropy",
                  metrics=["accuracy"])
    model.summary()
    model.fit(x_train,
              y_train,
              epochs=epochs,
              batch_size=batch_size,
              verbose=0)
    print("model.type:", type(model))

    test_loss, test_acc = model.evaluate(x_test, y_test)
    print("test_acc:", test_acc)
    print("test_loss:", test_loss)

    if mlflow_custom_log:
        mlflow.log_param("epochs", epochs)
        mlflow.log_param("batch_size", batch_size)

        mlflow.log_metric("test_acc", test_acc)
        mlflow.log_metric("test_loss", test_loss)

        # Save as TensorFlow SavedModel format (MLflow Keras default)
        mlflow.keras.log_model(model,
                               "keras-model",
                               registered_model_name=model_name)
        #mlflow.keras.log_model(model, "keras-model")

        # write model summary
        summary = []
        model.summary(print_fn=summary.append)
        summary = "\n".join(summary)
        with open("model_summary.txt", "w") as f:
            f.write(summary)
        mlflow.log_artifact("model_summary.txt")

    elif model_name:
        utils.register_model(run, model_name)

    # write model as yaml file
    with open("model.yaml", "w") as f:
        f.write(model.to_yaml())
    mlflow.log_artifact("model.yaml")

    # MLflow - log onnx model
    if log_as_onnx:
        import onnx_utils
        mname = f"{model_name}_onnx" if model_name else None
        onnx_utils.log_model(model, "onnx-model", mname)

    predictions = model.predict_classes(x_test)
    print("predictions:", predictions)
Ejemplo n.º 4
0
def main():
    parser = argparse.ArgumentParser(description='Pretraining argument parser')
    parser = load_pretrain_args(parser)
    parser = load_test_args(parser)
    args = parser.parse_args()

    set_seeds(args.seed)

    train_data = get_train_data()
    valid_data = get_valid_data()
    test_data = get_test_data()

    nnet = create_nnet(train_data, args)

    optimizer = Adam(nnet.parameters(), lr=args.lr)
    ce_loss = nn.CrossEntropyLoss()
    mse_loss = nn.MSELoss()

    action_space = ActionSpace()

    tb = SummaryWriter()

    best_score = 0

    for epoch in range(1, args.update_epochs + 1):
        print(f'Epoch {epoch}')

        for indice in random_batch(len(train_data), args.train_batch_size):
            batch = train_data[indice]
            input_batch = to_input_batch(batch, torch.device('cuda'))

            policies, values = nnet(input_batch)

            target_policies = get_target_policies(batch, action_space).cuda()
            target_values = get_target_values(batch).cuda()

            policy_loss = ce_loss(policies, target_policies)
            value_loss = mse_loss(values, target_values)
            loss = policy_loss + value_loss

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        accuracy = test(valid_data, nnet, args, tb, epoch)

        if accuracy > best_score:
            best_score = accuracy
            torch.save(nnet.module.state_dict(), 'models/pretrained.pt')

    nnet.module.load_state_dict(torch.load('models/pretrained.pt'))

    test(test_data, nnet, args, tb, args.update_epochs + 1)
Ejemplo n.º 5
0
def train(output_dir, epochs=10):
    img_size = 128
    batch_size = 4

    model = get_segmentation_model(img_size)
    print(model.summary())

    data = get_train_data()[:500]
    val_split = 0.1
    train_size = int(len(data) * (1 - val_split))
    train_data = data[:train_size]
    val_data = data[train_size:]

    train_generator = DataGenerator(train_data, batch_size, img_size)
    val_generator = DataGenerator(val_data, batch_size, img_size)

    sample_batch = val_generator.sample()
    display_callback = DisplayCallback(model,
                                       sample_batch,
                                       img_size,
                                       600,
                                       100,
                                       frequency=1)

    model_path = output_dir + "/weights.hdf5"
    cp = tf.keras.callbacks.ModelCheckpoint(filepath=model_path,
                                            monitor='val_loss',
                                            save_best_only=True,
                                            verbose=1)

    callbacks = [lr_schedule(), cp, display_callback]

    history = model.fit_generator(train_generator,
                                  validation_data=val_generator,
                                  epochs=epochs,
                                  shuffle=True,
                                  callbacks=callbacks,
                                  verbose=1,
                                  use_multiprocessing=True,
                                  workers=4)

    model.load_weights(model_path)
    tf.saved_model.save(model, output_dir + "/saved_model/")
    return model
Ejemplo n.º 6
0
def main():
    mp.set_start_method('spawn')
    mpp.Pool.istarmap = istarmap  # for tqdm

    parser = argparse.ArgumentParser(description='Training argument parser')
    parser = load_train_args(parser)
    parser = load_test_args(parser)
    args = parser.parse_args()

    set_seeds(args.seed)

    train_data = get_train_data()
    valid_data = get_valid_data()

    nnet = create_nnet(train_data, args)
    nnet.module.load_state_dict(torch.load(f'models/{args.load}'))
    nnets = create_nnets(train_data, args, n_nnets=torch.cuda.device_count())

    optimizer = Adam(nnet.parameters(), lr=args.lr)
    policy_loss_fn = nn.KLDivLoss(reduction='batchmean')
    value_loss_fn = nn.MSELoss()

    action_space = ActionSpace()

    train_examples = deque(maxlen=args.examples_len)

    tb = SummaryWriter()  # tensorboard writer

    epoch = 0
    while True:
        for indice in random_batch(len(train_data), args.train_batch_size):
            epoch += 1
            print(f'Epoch {epoch}')

            copy_nnet(nnet, nnets)  # nnet -> nnets

            curr_examples = simulate(train_data[indice], nnets, action_space,
                                     args)
            train_examples.extend(curr_examples)

            update_net(train_examples, nnet, optimizer, policy_loss_fn,
                       value_loss_fn, args, tb, epoch)

            test(valid_data, nnet, args, tb, epoch)
Ejemplo n.º 7
0
    def train_net(self, board, target_direction):
        train_data, train_targets = get_train_data(board, target_direction)

        train_data = torch.Tensor(train_data).to(self.device).float()
        train_targets = torch.Tensor(train_targets).to(self.device).long().squeeze(1)  #

        # if self.game.score <= 2048:
        #     y0 = self.net0.forward(train_data)
        #     loss0 = self.criterion0(y0, train_targets)
        #
        #     self.optimizer0.zero_grad()
        #     loss0.backward()
        #     self.optimizer0.step()

        if self.game.score >= 512:
            y1 = self.net1.forward(train_data)
            loss1 = self.criterion1(y1, train_targets)

            self.optimizer1.zero_grad()
            loss1.backward()
            self.optimizer1.step()
Ejemplo n.º 8
0
        checkpoint_path = tf.train.latest_checkpoint(FLAGS.output_dir)
        saver.restore(sess, checkpoint_path)
        logging.debug('restore from [{0}]'.format(checkpoint_path))

    except Exception:
        logging.debug('no check point found....')

    state = sess.run(model.init_state)
    for epoch in range(10):
        logging.debug('epoch [{0}]....'.format(epoch))

        #for dl in utils.get_train_data(vocabulary, batch_size=FLAGS.batch_size, num_steps=FLAGS.num_steps):
        for step, (x, y) in enumerate(
                utils.get_train_data(len(vocabulary),
                                     raw_x,
                                     raw_y,
                                     batch_size=FLAGS.batch_size,
                                     num_steps=FLAGS.num_steps)):

            ##################
            # Your Code here
            ##################
            feed_dict = {
                model.X: x,
                model.Y: y,
                model.keep_prob: 0.5,
                model.init_state: state
            }
            '''gs, _, state, l, summary_string = sess.run(
                [model.global_step, model.optimizer, model.outputs_state_tensor, model.loss, model.merged_summary_op], feed_dict=feed_dict)'''
            gs, _, state, l, summary_string = sess.run([
Ejemplo n.º 9
0
class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.fc1 = nn.Linear(6, 36)
        self.fc2 = nn.Linear(36, 2)

    def forward(self, x):
        x = self.fc1(x)
        x = F.tanh(x)
        x = self.fc2(x)
        return x

    def predict(self, x):
        pred = F.softmax(self.forward(x))
        ans = []
        for t in pred:
            print(t)
            if t[0] == 1:
                ans.append(0)
            else:
                ans.append(1)
        return torch.tensor(ans)


data, target = get_train_data()

model = torch.load("federated_model.model")

print(accuracy_score(model.predict(data), target))
model = LipModel()

with open(model_path, 'rb') as f:
    model.load_state_dict(torch.load(f, map_location=device))
model.eval()
model.to(device)

index2label = []
with open('dictionary/dictionary.txt', 'r', encoding='utf-8') as f:
    for word in f:
        index2label.append(word.split(',')[0])

with open('data_pkl/data_test.pkl', 'rb') as f:
    test_data = pickle.load(f)
    test_ids = pickle.load(f)
test_data, test_ids = get_train_data(test_data, test_ids, 16, test_data=True)

print('=======>> Predict')
predict_result = []
with torch.no_grad():
    for idx in range(len(test_data)):
        inputs = test_data[idx].to(device)
        possibility = model(inputs)[0]

        pred = torch.argmax(possibility, dim=-1).tolist()
        assert len(pred) == len(test_ids[idx])
        for i, ids in enumerate(test_ids[idx]):
            predict_result.append(ids + ',' + index2label[pred[i]])

with open('results/attention/att_submit_1.csv', 'w', encoding='utf-8') as f:
    for line in predict_result:
Ejemplo n.º 11
0
dim = 128


def seq_maxpool(x):
    seq, mask = x
    seq -= (1 - mask) * 1e10

    return K.max(seq, 1)


id2kb, kb2id = get_kb()

print(type(id2kb))

train_data = get_train_data()
print("len(train_data): %d" % len(train_data))

id2char, char2id = get_char_dict(id2kb, train_data)
print("char number is %d", len(id2char))

random_order = get_random(train_data)

dev_data = [train_data[j] for i, j in enumerate(random_order) if i % 9 == 0]
train_data = [train_data[j] for i, j in enumerate(random_order) if i % 9 != 0]

sentence_in = Input(shape=(None, ))  #带识别句子输入
mention_in = Input(shape=(None, ))  #实体语义表达式
left_in = Input(shape=(None, ))  #识别左边界
right_in = Input(shape=(None, ))  #识别右边界
y_in = Input(shape=(None, ))  #实体标记
Ejemplo n.º 12
0
    try:
        checkpoint_path = tf.train.latest_checkpoint(FLAGS.output_dir)
        saver.restore(sess, checkpoint_path)
        logging.debug('restore from [{0}]'.format(checkpoint_path))

    except Exception:
        logging.debug('no check point found....')


    max_steps = ( len(vocabulary_int)//(FLAGS.batch_size*FLAGS.num_steps) )  # 以num_steps为基本单位计算
    step = 0
    for epoc in range(1):
        # logging.debug('epoch [{0}]....'.format(epoc))
        state = sess.run(model.state_tensor)   # RNN的起始状态
        for x, y in utils.get_train_data(vocabulary_int, batch_size=FLAGS.batch_size, num_steps=FLAGS.num_steps):
            step += 1
            feed_dict = {model.X: x, model.Y: y, model.keep_prob: 0.85, model.state_tensor: state}

            gs, _, state, l, summary_string = sess.run(
                [model.global_step, model.optimizer, model.outputs_state_tensor,
                 model.loss, model.merged_summary_op], feed_dict=feed_dict)
            summary_string_writer.add_summary(summary_string, gs)

            if gs % (max_steps // 10) == 0:
                logging.debug('step [{0}] loss [{1}]'.format(gs, l))

            if gs % (max_steps // 4) == 0:
                save_path = saver.save(sess, os.path.join(FLAGS.output_dir, "model.ckpt"), global_step=gs)

            if step >= max_steps:
Ejemplo n.º 13
0
                n = 0
                for batch in range(n_chunk):
                    train_loss, _, _ = sess.run([cost, last_state, train_op],
                                                feed_dict={
                                                    input_data: x_batches[n],
                                                    output_data: y_batches[n]
                                                })
                    n += 1
                    logger.info('epoch:%d batch:%d train_loss:%s' %
                                (epoch, batch, str(train_loss)))
                    if epoch % self.num_epoch_to_save_model == 0:
                        saver.save(sess,
                                   self.save_model_path,
                                   global_step=epoch)


if __name__ == "__main__":
    config = config.config
    # 数据预处理
    train_data = get_train_data(config['train_data_paths'])
    # train
    trainer = Train(train_data=train_data,
                    model_type=config['model_type'],
                    dim_nn=config['dim_nn'],
                    num_layers=config['num_layers'],
                    batch_size=config['batch_size'],
                    num_passes=config['num_passes'],
                    num_epoch_to_save_model=config['num_epoch_to_save_model'],
                    save_model_path=config['save_model_path'])
    trainer.run()
Ejemplo n.º 14
0
# This code requires TensorFlow version >=1.9

import tensorflow as tf
tf.enable_eager_execution()
from sklearn.model_selection import train_test_split

import numpy as np
import os
import time

from utils import get_train_data, get_inception_model, image_to_feature, text_to_vec

from models import CNN_Encoder, RNN_Decoder, loss_function

print("Dowloading dataset")
train_captions, img_name_vector = get_train_data()

print("Dowloading pretrained InceptionV3 weights")
image_features_extract_model = get_inception_model()

print("Transforming images into features")
image_to_feature(img_name_vector, image_features_extract_model)

print("Transforming text to vectors")
tokenizer, cap_vector = text_to_vec(train_captions)

print(" Create training and validation sets using 80-20 split")
img_name_train, img_name_val, cap_train, cap_val = train_test_split(
    img_name_vector, cap_vector, test_size=0.2, random_state=0)

#########
Ejemplo n.º 15
0
#!/usr/bin/env python

from numpy import exp
from sklearn.linear_model import ElasticNet
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
import utils
import encode as enc

best_ratio = .825
best_alpha = 0.00062

if __name__ == '__main__':
    train = utils.get_train_data([], False)
    test = utils.get_test_data(None, [])
    ncols = [
        c for c, d in zip(train.columns, train.dtypes)
        if str(d) in ['float64', 'int64']
    ]
    ncols.remove('SalePrice')
    ncols.remove('GarageYrBlt')
    ncols.remove('Id')
    ids = test.Id
    train, test = enc.fix_categorical(train.drop(utils.TEST_IGNORE, axis=1),
                                      test.drop(utils.TEST_IGNORE, axis=1),
                                      option='one_hot')
    enc.fix_numeric(train, test, ncols, scaling='uniform')

    clf3 = GradientBoostingRegressor()
    cv = utils.run_cross_val(clf3, train)
    print("GradientBoosting")
    xx = np.linspace(0, 10, 100)
    ax.plot(xx, model.predict(xx[:, np.newaxis]), "orange")


# Quadratischer Fehler
def quadratic_error(y_true, y_pred):
    return np.mean((y_true - y_pred)**2)


# Absoluter Fehler
def absolute_error(y_true, y_pred):
    return np.mean(np.abs(y_true - y_pred))


# Daten erstellen und umformen
x_train, y_train = utils.get_train_data()
x_train = x_train[:, np.newaxis]

x_test, y_test = utils.get_test_data()
x_test = x_test[:, np.newaxis]

# Lineare Regression
model = lr()
model.fit(x_train, y_train)

### Fehler berechnen
y_pred_train = model.predict(x_train)
y_pred_test = model.predict(x_test)

# Trainingsfehler
quadratic_train_error = quadratic_error(y_train, y_pred_train)
Ejemplo n.º 17
0
 print("X")
 print(X[0])
 print("Y")
 print(Y[0])
 print("t")
 print(w2idx)
 print("Index to word2vec")
 print(idx2w)
 # seqlen = X.shape[0]
 #
 # create the model
 model = LSTM_rnn(state_size=15, num_classes=len(idx2w))
 # to train or to generate?
 if args['train']:
     # get train set
     train_set = utils.get_train_data(x, xq, y, batch_size=BATCH_SIZE)
     print(train_set)
     # for i in train_set:
     #     print(i)
     #     break
     #
     # start training
     model.train(train_set)
 elif args['generate']:
     # call generate method
     text = model.generate(
         idx2w,
         w2idx,
         num_words=args['num_words'] if args['num_words'] else 100,
         separator='')
     #########
Ejemplo n.º 18
0
import json
import logging
import os

import tensorflow as tf

import utils
from model import Model
from utils import read_data

from flags import parse_args

FLAGS, unparsed = parse_args()

vocabulary = read_data(FLAGS.text)
print('Data size', len(vocabulary))

with open('./dictionary.json', encoding='utf-8') as inf:
    dictionary = json.load(inf, encoding='utf-8')

with open('./reverse_dictionary.json', encoding='utf-8') as inf:
    reverse_dictionary = json.load(inf, encoding='utf-8')

for dl in utils.get_train_data(vocabulary,
                               dictionary,
                               batch_size=128,
                               num_steps=32):
    #print(x)
    print('=-=-=-=-=-x=-=-=-=-=-=-', x)
    print('=-=-=-=-=-y=-=-=-=-=-=-', y)
Ejemplo n.º 19
0
    net = reset.resnet18(n_channels=1, n_classes=2)

    #args.load = "checkpoints/0402CP120.pth"
    if args.load:
        net.load_state_dict(torch.load(args.load))
        print('Model loaded from {}'.format(args.load))

    if args.gpu:
        net.cuda()
        #net = torch.nn.DataParallel(net, device_ids=args.gpu).cuda()
        # cudnn.benchmark = True # faster convolutions, but more memory

    input_path = '../data/train_feature.npy'
    target_path = '../data/train_social_label.npy'

    train_set = get_train_data(input_path, target_path)
    test_set = get_test_data(input_path, target_path)

    train_data_loader = DataLoader(dataset=train_set,
                                   num_workers=4,
                                   batch_size=args.batchsize,
                                   shuffle=True)
    test_data_loader = DataLoader(dataset=test_set,
                                  num_workers=4,
                                  batch_size=args.batchsize,
                                  shuffle=False)

    # predit(net=net,
    #           train_loader = train_data_loader,
    #           val_loader = test_data_loader,
    #           epochs=args.epochs,
Ejemplo n.º 20
0
vocabulary = read_data(FLAGS.text)
print('Data size', len(vocabulary))

# with open(FLAGS.dictionary, encoding='utf-8') as inf:
# dictionary = json.load(inf, encoding='utf-8')

# with open(FLAGS.reverse_dictionary, encoding='utf-8') as inf:
# reverse_dictionary = json.load(inf, encoding='utf-8')

# data, count, dictionary, reverse_dictionary = build_dataset(vocabulary, 10000)
# data, count, dictionary, reverse_dictionary = build_dataset(vocabulary, len(count))
# reverse_list = [reverse_dictionary[i]
# for i in range(len(reverse_dictionary))]

batches, vol_len = utils.get_train_data(vocabulary,
                                        batch_size=FLAGS.batch_size,
                                        seq_length=FLAGS.num_steps)

logging.debug(vol_len)
model = Model(learning_rate=FLAGS.learning_rate,
              batch_size=FLAGS.batch_size,
              num_steps=FLAGS.num_steps,
              num_words=vol_len,
              rnn_layers=FLAGS.rnn_layers)
model.build()

with tf.Session() as sess:
    summary_string_writer = tf.summary.FileWriter(FLAGS.output_dir, sess.graph)

    saver = tf.train.Saver(max_to_keep=5)
    sess.run(tf.global_variables_initializer())
Ejemplo n.º 21
0
@author: Rajkumar
"""

import numpy as np
import utils
from skimage.io import imsave
from skimage.color import rgb2lab, lab2rgb
from Net import Net
from keras.models import load_model
from keras.models import model_from_json

# Define Train Images Path
TRAIN_FOLDER = 'Train/'

# Get Train Data.
TRAIN_DATA = utils.get_train_data(TRAIN_FOLDER)
TRAIN_DATA_SIZE = len(TRAIN_DATA)

# Get the CNN model
net = Net(train=True)
CNN = net.encode()

# Define BatchSize
BATCH_SIZE = 50

if BATCH_SIZE < TRAIN_DATA_SIZE:
    steps = TRAIN_DATA_SIZE / BATCH_SIZE
else:
    steps = 1

#########################################
Ejemplo n.º 22
0
    sess.run(tf.global_variables_initializer())
    sess.run(tf.local_variables_initializer())
    logging.debug('Initialized')

    try:
        checkpoint_path = tf.train.latest_checkpoint(FLAGS.output_dir)
        saver.restore(sess, checkpoint_path)
        logging.debug('restore from [{0}]'.format(checkpoint_path))

    except Exception:
        logging.debug('no check point found....')

    for x in range(1):
        logging.debug('epoch [{0}]....'.format(x))
        state = sess.run(model.state_tensor)
        for step, (X, Y) in enumerate (utils.get_train_data(vocabulary, batch_size=FLAGS.batch_size, num_steps=FLAGS.num_steps)):
            ###################
            # My Code Start
            ###################
            feed_dict = {model.X: X, model.Y: Y, model.state_tensor: state}
            ###################
            # My Code End
            ###################
            gs, _, state, l, summary_string = sess.run(
                [model.global_step, model.optimizer, model.outputs_state_tensor, model.loss, model.merged_summary_op], feed_dict=feed_dict)
            summary_string_writer.add_summary(summary_string, gs)

            if gs % 10 == 0:
                logging.debug('step [{0}] loss [{1}]'.format(gs, l))
                save_path = saver.save(sess, os.path.join(
                    FLAGS.output_dir, "model.ckpt"), global_step=gs)
Ejemplo n.º 23
0
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils import get_train_data
from sklearn.metrics import accuracy_score

X, y = get_train_data()


class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.fc1 = nn.Linear(6, 36)
        self.fc2 = nn.Linear(36, 2)

    def forward(self, x):
        x = self.fc1(x)
        x = F.tanh(x)
        x = self.fc2(x)
        return x

    def predict(self, x):
        pred = F.softmax(self.forward(x))
        ans = []
        for t in pred:
            if t[0] > t[1]:
                ans.append(0)
            else:
                ans.append(1)
        return torch.tensor(ans)
Ejemplo n.º 24
0
        return accuracy, confusion_matrix

    # get most/least prototypical
    def get_prototypical(self):
        return self.most_prototypical, self.least_prototypical

    # get odds ratio heatmap
    def get_odds_ratio(self, a, b):
        a_map = (self.black_count[a] + self.laplace_smoothing) / (self.digit_count[a] + 2.0 * self.laplace_smoothing)
        b_map = (self.black_count[b] + self.laplace_smoothing) / (self.digit_count[b] + 2.0 * self.laplace_smoothing)
        odds_map = a_map / b_map
        return np.log(a_map), np.log(b_map), np.log(odds_map)

if __name__ == "__main__":
    train_images, train_numbers = utils.get_train_data()
    test_images, test_numbers = utils.get_test_data()

    # train here
    classifier = SinglePixelClassifier(train_images, train_numbers)

    # classify here
    accuracy, confusion_matrix = classifier.evaluate(test_images, test_numbers)

    print("\nAccuracy over all of test data: {:.2%}".format(accuracy))

    print("\nConfusion matrix: row - truth label, column - classifier output")
    print(np.around(confusion_matrix, 3))

    most_prototypical, least_prototypical = classifier.get_prototypical()
Ejemplo n.º 25
0
    try:
        checkpoint_path = tf.train.latest_checkpoint(FLAGS.output_dir)
        saver.restore(sess, checkpoint_path)
        logging.debug('restore from [{0}]'.format(checkpoint_path))

    except Exception:
        logging.debug('no check point found....')

    init_state = sess.run(model.state_tensor)
    for x in range(1):
        logging.debug('epoch [{0}]....'.format(x))

        state = init_state

        for batch_input, batch_labels in utils.get_train_data(vocabulary, batch_size=FLAGS.batch_size, num_steps=FLAGS.num_steps):

            ##################
            # Your Code here
            ##################

            # model.state_tensor = state   # 本地环境放到feed_dict里会报错
            feed_dict = {
                model.X: batch_input,
                model.Y: batch_labels,
                model.keep_prob: FLAGS.keep_prob,
                model.state_tensor: state
            }
            gs, _, state, l, summary_string = sess.run(
                [model.global_step, model.optimizer, model.outputs_state_tensor, model.loss, model.merged_summary_op], feed_dict=feed_dict)
            summary_string_writer.add_summary(summary_string, gs)
Ejemplo n.º 26
0
def train(fea_file, pos_file, neg_file):
  X, y = get_train_data(pos_file, neg_file, fea_file)
  model = linear_model.LogisticRegression(C=0.1, penalty = 'l2')

  model.fit(X, y)
  return model
Ejemplo n.º 27
0
    #         datas[index_mid + i] = datas[i]
    #         datas[i] = tmp_data
    #         labels[index_mid + i] = labels[i]
    #         labels[i] = tmp_label
    #     remain_mid = int(int(len(datas) - index) / 2)
    #     for i in range(remain_mid):
    #         tmp_data = datas[index + i]
    #         datas[index + i] = datas[index + remain_mid + i]
    #         datas[index + remain_mid + i] = tmp_data
    #         tmp_label = labels[index + i]
    #         labels[index + i] = labels[index + remain_mid + i]
    #         labels[index + remain_mid + i] = tmp_label
    #     return datas, labels


word_train, word_test, res_train, res_test = utils.get_train_data()

bp = BPNetwork()
if __name__ == '__main__':
    '''
    初始化神经网络的结构
    输入层 28 * 28 = 784
    输出层 12
    '''
    bp.setup(784, 12, [20])
    # 初始化学习率,训练次数
    learn = 0.1
    times = 50
    print("训练开始: " + datetime.datetime.now().strftime('%Y.%m.%d-%H:%M:%S'))

    bp.train(word_train, res_train, learn, times)
Ejemplo n.º 28
0
def train():
    print("=====>> Select GPU")
    os.environ['CUDA_VISIBLE_DEVICES'] = '0'
    # 需要更改
    random.seed(3020)
    val_number = 1200

    print("=====>> Parameter")
    train_path = 'data_pkl/data_train.pkl'

    batch_size = 16
    epochs = 10
    device = 'cuda:0'
    lr = 0.0005

    model = LipModel()
    model.to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)

    print("======>> load data")
    with open(train_path, 'rb') as f:
        train_data = pickle.load(f)
        label_data = pickle.load(f)
    print("======>> cut data to get val data")
    train_data, label_data, val_data, val_label = cut_data(train_data, label_data, val_number)
    print("======>> Generate torch train data")
    train_data, label_data = get_train_data(train_data, label_data, batch_size)
    val_data, val_label = get_train_data(val_data, val_label, batch_size)

    print("=======>> Start Training Model")
    best_acc = 0
    pred_re = []
    true_re = []
    for epoch in range(epochs):
        avg_loss = AverageMeter()
        # random batch
        data_lists = list(range(len(train_data)))
        random.shuffle(data_lists)
        model.train()
        for step, ids in tqdm(enumerate(data_lists)):
            inputs = train_data[ids].to(device)
            labels = label_data[ids].to(device)

            possibility, loss = model(inputs, labels)
            possibility = torch.argmax(possibility, dim=-1)
            loss = loss.mean()
            pred_re.append(possibility)
            true_re.append(labels)
            avg_loss.update(loss)
            optimizer.zero_grad()
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
            optimizer.step()
            step += 1
        print("Number of Epoch:" + str(epoch), end='\t')
        print("Current Avg loss:{0:.6f}".format(avg_loss.avg))
        pred_re = []
        true_re = []
        print("=====> Start Val")
        model.eval()
        acc = 0
        count = 0
        avg_loss_val = AverageMeter()
        with torch.no_grad():
            for ids in tqdm(range(len(val_data))):
                inputs = val_data[ids].to(device)
                labels = val_label[ids].to(device)
                possibility, loss = model(inputs, labels)
                avg_loss_val.update(loss.mean().item())
                count += inputs.size(0)
                acc += torch.sum(torch.eq(torch.argmax(possibility, dim=-1), labels)).item()
        acc = acc / count
        print("Current Val Loss: {0:.6f}  Accuracy: {1:.6f}".format(avg_loss_val.avg, acc))

        if acc >= best_acc:
            torch.save(model.state_dict(), "weights/attention/attention_net_{}.pt".format(epoch))
            best_acc = acc
            print("Saved Model, epoch = {0}".format(epoch))
         predictions: numpy array of test's predict
    """
    predictions = []
    for img in test_set:
        result = np.dot(img, w) + b
        result = result > 0

        predictions.append(result)

    predictions = np.array(predictions)
    return predictions


########## Get inputs and labels
read_start = time.time()
labels, inputs = get_train_data("../data/train_binary.csv")
inputs = get_hog_feature(inputs)
####### NOTE:
### ('labels :', 42000)
### ('inputs :', (42000, 324))
####### END NOTE('labels :', 42000)
########## END Get inputs and labels

########## Split into train and test set
train_inputs, test_inputs, train_labels, test_labels = train_test_split(
    inputs, labels, test_size=0.3, random_state=2333)
read_end = time.time()
########## END Split into train and test set

########## read_data log
print("read data finished!!")
Ejemplo n.º 30
0
    sess.run(tf.global_variables_initializer())
    sess.run(tf.local_variables_initializer())
    logging.debug('Initialized')

    try:
        checkpoint_path = tf.train.latest_checkpoint(FLAGS.output_dir)
        saver.restore(sess, checkpoint_path)
        logging.debug('restore from [{0}]'.format(checkpoint_path))

    except Exception:
        logging.debug('no check point found....')

    for x in range(1):
        logging.debug('epoch [{0}]....'.format(x))
        state = sess.run(model.state_tensor)
        for dl in utils.get_train_data(vocabulary, batch_size=FLAGS.batch_size, num_steps=FLAGS.num_steps):

            ##################
            # Your Code here
            ##################
            x=index_data(dl[0],dictionary)
            y=index_data(dl[1],dictionary)
            feed_dict = {model.X:x,model.Y:y,model.keep_prob:0.8}

            gs, _, state, l, summary_string = sess.run(
                [model.global_step, model.optimizer, model.outputs_state_tensor, model.loss, model.merged_summary_op], feed_dict=feed_dict)
            summary_string_writer.add_summary(summary_string, gs)

            if gs % 10 == 0:
                logging.debug('step [{0}] loss [{1}]'.format(gs, l))
                save_path = saver.save(sess, os.path.join(
Ejemplo n.º 31
0
import numpy as np
from sklearn import tree
import utils

X, Y = utils.get_train_data()

clf = tree.DecisionTreeClassifier()
clf = clf.fit(X, Y)

test_images, test_labels = utils.get_test_data()

num_correct = 0
confusion_matrix = np.zeros((10, 10))
guesses = clf.predict(test_images)
for i in range(len(test_images)):
    guess = guesses[i]
    real = test_labels[i]

    if guess == real:
        num_correct += 1

    confusion_matrix[real][guess] += 1

for i in range(10):
    confusion_matrix[i] /= np.sum(confusion_matrix[i])
accuracy = num_correct / len(test_labels)

print("Accuracy: " + str(accuracy))
print(np.around(confusion_matrix, 3))