コード例 #1
0
 def __init__(self):
     self.n_games = 0
     self.random_game = 50  # randomness
     self.gamma = 0.9  # discount rate
     self.memory = deque(maxlen=MAX_MEMORY)  # popleft()
     self.model = NN()
     self.trainer = Train(self.model, gamma=self.gamma)
コード例 #2
0
    def __init__(self,
                 pop_size,
                 rows,
                 randomize_weights=True,
                 input_file=None):
        # layer layout of each NN
        layers = [24, 16, 16, 4]

        # calculate the number of genes in a dna strand
        self.dna_size = 0
        for i in range(1, len(layers)):
            self.dna_size += layers[i - 1] * layers[i]

        # initialize the population
        self.population = np.empty(pop_size, dtype=NN)
        for i in range(pop_size):
            self.population[i] = NN(layers, self.dna_size)

        # insert the given weights if the user so chooses
        if not randomize_weights:
            self.population[0].set_weights(
                np.loadtxt('data.csv', delimiter=','))

        # size of the field
        self.rows = rows
コード例 #3
0
def predict(X):
    nn = NN([6, 8, 4, 1])
    nn.load_state_dict(torch.load('model_state/state'))
    nn.eval()
    result = nn.forward(X).detach().numpy().reshape(-1, 1)
    result_transformed = scaler.inverse_transform(result)
    return result_transformed
コード例 #4
0
 def train(self, epochs=10):
     Model = NN(batch_size=30)
     opt = optim.Adam(Model.parameters(), lr=0.005)
     criterion = nn.BCELoss()
     softmax = nn.Softmax(dim=0)
     loss = 0
     print(self.labels.shape)
     for i in range(epochs):
         item_loss = 0
         for i, (feat, lab) in enumerate(self.dataloader):
             feat = feat[0][:, :1, :, :]
             output = Model.forward(feat)
             loss = criterion(output, lab.view((30)))
             loss.backward()
             opt.step()
             item_loss += loss.item()
         print("LOSS ---->", item_loss / len(self.dataloader))
     torch.save(Model.state_dict(), "1")
コード例 #5
0
 def net_loader(self, path = None):
     testm = NN(self.Layer_s).to(device)
     if path is None:
       model_name = self.part_number + '_model_lr_' + str(self.Learning_r) + '_layer_size_' + str(self.Layer_s) + '.pt'
       path = os.path.join(self.checkpoint_dir, model_name)
       testm.load_state_dict(torch.load(path))
       print(model_name,' Was loaded successfully loaded.\t\t\t [loaded]')
     else:
       testm.load_state_dict(torch.load(path))
       print(model_name,' Was loaded successfully loaded from the path.\t\t\t [loaded from Path]')
     return testm  
コード例 #6
0
def train_model(DATA_LENGTH, num_file):
    #文件读取
    x_train = pd.read_csv("x_train.csv")
    x_test = pd.read_csv("x_test.csv")
    y_train = pd.read_csv("y_train.csv")
    y_test = pd.read_csv("y_test.csv")
    #取出dataframe中的值,组成二维矩阵,行index表示样本index,列表示特征index,可以直接输入到算法中
    x_train = x_train.iloc[:, :].values.reshape(-1, DATA_LENGTH)
    x_test = x_test.iloc[:, :].values.reshape(-1, DATA_LENGTH)
    y_train = y_train.iloc[:, :].values.reshape(-1, num_file)
    y_test = y_test.iloc[:, :].values.reshape(-1, num_file)

    fnn_layers = [600, 500, 400, 300, 200, 100, 50, 11]

    my_netural_netweok = NN(fnn_layers=fnn_layers,
                            input_dim=DATA_LENGTH,
                            output_dim=num_file,
                            batch_size=100,
                            act=tf.nn.relu,
                            learning_rate=0.01,
                            keep_rate=0.05)

    init = tf.initialize_all_variables()
    feed_train = {
        my_netural_netweok.x: x_train,
        my_netural_netweok.y: y_train,
        my_netural_netweok.model: 'train'
    }
    feed_test = {
        my_netural_netweok.x: x_test,
        my_netural_netweok.y: y_test,
        my_netural_netweok.model: 'test'
    }
    epcho = 1000
    with tf.Session() as sess:
        sess.run(init)
        for i in range(epcho):
            error, _ = sess.run(
                [my_netural_netweok.loss, my_netural_netweok.train_op],
                feed_dict=feed_train)
            if (i % 10 == 0):
                print('train loss', error)
                acc = sess.run(my_netural_netweok.accuracy,
                               feed_dict=feed_test)
                print('test accuracy', acc)
コード例 #7
0
def main(option):
    logging.basicConfig(
        stream=sys.stdout,
        level=logging.DEBUG,
        format=
        '%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s'
    )

    dataset = HardSimilarityDataset()
    if option.dataset_cache is None:
        glove = Glove(option.emb_file)
        logging.info('Embeddings loaded')
        dataset.load(option.dataset_file, glove)
    else:
        dataset.load_cache(option.dataset_cache)
    logging.info('Dataset loaded')

    embeddings = nn.Embedding(option.vocab_size, option.emb_dim, padding_idx=1)
    if option.model == 'NTN':
        model = NeuralTensorNetwork(embeddings, option.em_k)
    elif option.model == 'LowRankNTN':
        model = LowRankNeuralTensorNetwork(embeddings, option.em_k,
                                           option.em_r)
    elif option.model == 'RoleFactor':
        model = RoleFactoredTensorModel(embeddings, option.em_k)
    elif option.model == 'Predicate':
        model = PredicateTensorModel(embeddings)
    elif option.model == 'NN':
        model = NN(embeddings, 2 * option.em_k, option.em_k)
    elif option.model == 'EMC':
        model = EMC(embeddings, 2 * option.em_k, option.em_k)
    else:
        logging.info('Unknown model type: ' + option.model)
        exit(1)

    checkpoint = torch.load(option.model_file, map_location='cpu')
    if type(checkpoint) == dict:
        if 'event_model_state_dict' in checkpoint:
            state_dict = checkpoint['event_model_state_dict']
        else:
            state_dict = checkpoint['model_state_dict']
    else:
        state_dict = checkpoint
    model.load_state_dict(state_dict)
    logging.info(option.model_file + ' loaded')

    # embeddings = nn.Embedding(option.vocab_size, option.emb_dim, padding_idx=1)
    # embeddings.weight.data = torch.from_numpy(glove.embd).float()
    # model = Averaging(embeddings)

    if option.use_gpu:
        model.cuda()
    model.eval()

    data_loader = torch.utils.data.DataLoader(
        dataset,
        collate_fn=HardSimilarityDataset_collate_fn,
        shuffle=False,
        batch_size=len(dataset))
    batch = next(iter(data_loader))
    pos_e1_subj_id, pos_e1_subj_w, pos_e1_verb_id, pos_e1_verb_w, pos_e1_obj_id, pos_e1_obj_w, \
    pos_e2_subj_id, pos_e2_subj_w, pos_e2_verb_id, pos_e2_verb_w, pos_e2_obj_id, pos_e2_obj_w, \
    neg_e1_subj_id, neg_e1_subj_w, neg_e1_verb_id, neg_e1_verb_w, neg_e1_obj_id, neg_e1_obj_w, \
    neg_e2_subj_id, neg_e2_subj_w, neg_e2_verb_id, neg_e2_verb_w, neg_e2_obj_id, neg_e2_obj_w = batch

    if option.use_gpu:
        pos_e1_subj_id = pos_e1_subj_id.cuda()
        pos_e1_subj_w = pos_e1_subj_w.cuda()
        pos_e1_verb_id = pos_e1_verb_id.cuda()
        pos_e1_verb_w = pos_e1_verb_w.cuda()
        pos_e1_obj_id = pos_e1_obj_id.cuda()
        pos_e1_obj_w = pos_e1_obj_w.cuda()
        pos_e2_subj_id = pos_e2_subj_id.cuda()
        pos_e2_subj_w = pos_e2_subj_w.cuda()
        pos_e2_verb_id = pos_e2_verb_id.cuda()
        pos_e2_verb_w = pos_e2_verb_w.cuda()
        pos_e2_obj_id = pos_e2_obj_id.cuda()
        pos_e2_obj_w = pos_e2_obj_w.cuda()
        neg_e1_subj_id = neg_e1_subj_id.cuda()
        neg_e1_subj_w = neg_e1_subj_w.cuda()
        neg_e1_verb_id = neg_e1_verb_id.cuda()
        neg_e1_verb_w = neg_e1_verb_w.cuda()
        neg_e1_obj_id = neg_e1_obj_id.cuda()
        neg_e1_obj_w = neg_e1_obj_w.cuda()
        neg_e2_subj_id = neg_e2_subj_id.cuda()
        neg_e2_subj_w = neg_e2_subj_w.cuda()
        neg_e2_verb_id = neg_e2_verb_id.cuda()
        neg_e2_verb_w = neg_e2_verb_w.cuda()
        neg_e2_obj_id = neg_e2_obj_id.cuda()
        neg_e2_obj_w = neg_e2_obj_w.cuda()

    pos_e1_emb = model(pos_e1_subj_id, pos_e1_subj_w, pos_e1_verb_id,
                       pos_e1_verb_w, pos_e1_obj_id, pos_e1_obj_w)
    pos_e2_emb = model(pos_e2_subj_id, pos_e2_subj_w, pos_e2_verb_id,
                       pos_e2_verb_w, pos_e2_obj_id, pos_e2_obj_w)
    neg_e1_emb = model(neg_e1_subj_id, neg_e1_subj_w, neg_e1_verb_id,
                       neg_e1_verb_w, neg_e1_obj_id, neg_e1_obj_w)
    neg_e2_emb = model(neg_e2_subj_id, neg_e2_subj_w, neg_e2_verb_id,
                       neg_e2_verb_w, neg_e2_obj_id, neg_e2_obj_w)

    if option.distance_metric == 'cosine':
        distance_func = cosine_distance
    elif option.distance_metric == 'euclid':
        distance_func = euclid_distance
    pos_dist = distance_func(pos_e1_emb, pos_e2_emb)
    neg_dist = distance_func(neg_e1_emb, neg_e2_emb)
    num_correct = (pos_dist < neg_dist).sum().item()
    accuracy = num_correct / len(dataset)

    if option.output_file.strip() != '':
        output_file = open(option.output_file, 'w')
        for i, j, k in zip(pos_dist, neg_dist, (pos_dist < neg_dist)):
            output_file.write(
                ' '.join([str(i.item(
                )), str(j.item()), str(k.item())]) + '\n')
        output_file.close()
        logging.info('Output saved to ' + option.output_file)

    logging.info('Num correct: %d' % (num_correct, ))
    logging.info('Num total: %d' % (len(dataset), ))
    logging.info('Accuracy: %.4f' % (accuracy, ))

    if option.output_vectors is not None:
        vectors = torch.stack([pos_e1_emb, pos_e2_emb, neg_e1_emb, neg_e2_emb],
                              dim=1).cpu()
        torch.save(vectors, option.output_vectors)
        print('Vectors saved to %s' % (option.output_vectors, ))
コード例 #8
0
ファイル: run.py プロジェクト: aaai2020-8186/submission
from torch.optim import Adam
from trainer import TradeComm
from game import Game
import numpy as np
import argparse

parser = argparse.ArgumentParser()
parser.add_argument("--jobnum", default=0)
args = parser.parse_args()

num_items = 15
num_utterances = 15
input_size = 2 * num_items + 2 * num_utterances
hidden_size = 256
num_samples = 10000
epsilon = 1 / 10
policy_weight = 1 / 5
horizon = 2000
write_every = 10
lr = 1e-4
directory = 'results'
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

nn = NN(input_size, hidden_size, num_items, num_utterances)
opt = Adam(nn.parameters(), lr=lr)
g = Game(num_items, num_utterances)
agent = Agent(num_items, num_utterances, nn, opt, num_samples, epsilon,
              policy_weight, device)
trainer = TradeComm(g, agent, directory, args.jobnum)
trainer.run(horizon, write_every)
コード例 #9
0
t = np.array(target)[:, np.newaxis]

# Use only the attributes "Petal Length and Petal Width"
X = iris_data.data[:, 2:]
plt.figure(2, figsize=(8, 6))
plt.clf()

# Plot the training points
plt.scatter(X[:, 0], X[:, 1], c=t.flatten())
plt.xlabel('Petal length')
plt.ylabel('Petal width')
plt.title('Real Data', fontsize=16)
plt.show()

# Build neural network
model = NN()
model.add_layer(units=10, activation=np.tanh, initialization=np.random.normal)
model.add_layer(units=5, activation=np.tanh, initialization=np.random.normal)
model.add_layer(units=2, activation=np.tanh, initialization=np.random.normal)
model.add_layer(1, activation=model.sigmoid)

model.train(X, t, iterations=5000, learn_rate=0.0001)  # 5000
prediction = model.predict(X)

# Plot error
plt.plot(model.history['epoch'],
         model.history['error_train'],
         label='Training error')
plt.xlabel('Epoch')
plt.ylabel('Error')
plt.legend()
コード例 #10
0
def main(option):
    logging.basicConfig(
        stream=sys.stdout,
        level=logging.DEBUG,
        format=
        '%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s'
    )

    dataset = TransitiveSentenceSimilarityDataset()
    if option.dataset_cache is None:
        glove = Glove(option.emb_file)
        logging.info('Embeddings loaded')
        dataset.load(option.dataset_file, glove)
    else:
        dataset.load_cache(option.dataset_cache)
    logging.info('Dataset loaded')

    embeddings = nn.Embedding(option.vocab_size, option.emb_dim, padding_idx=1)
    if option.model == 'NTN':
        model = NeuralTensorNetwork(embeddings, option.em_k)
    elif option.model == 'LowRankNTN':
        model = LowRankNeuralTensorNetwork(embeddings, option.em_k,
                                           option.em_r)
    elif option.model == 'RoleFactor':
        model = RoleFactoredTensorModel(embeddings, option.em_k)
    elif option.model == 'Predicate':
        model = PredicateTensorModel(embeddings)
    elif option.model == 'NN':
        model = NN(embeddings, 2 * option.em_k, option.em_k)
    elif option.model == 'EMC':
        model = EMC(embeddings, 2 * option.em_k, option.em_k)
    else:
        logging.info('Unknown model type: ' + option.model)
        exit(1)

    checkpoint = torch.load(option.model_file, map_location='cpu')
    if type(checkpoint) == dict:
        if 'event_model_state_dict' in checkpoint:
            state_dict = checkpoint['event_model_state_dict']
        else:
            state_dict = checkpoint['model_state_dict']
    else:
        state_dict = checkpoint
    model.load_state_dict(state_dict)
    logging.info(option.model_file + ' loaded')

    # embeddings = nn.Embedding(option.vocab_size, option.emb_dim, padding_idx=1)
    # embeddings.weight.data = torch.from_numpy(glove.embd).float()
    # model = Averaging(embeddings)

    if option.use_gpu:
        model.cuda()
    model.eval()

    data_loader = torch.utils.data.DataLoader(
        dataset,
        collate_fn=TransitiveSentenceSimilarityDataset_collate_fn,
        shuffle=False,
        batch_size=len(dataset))
    batch = next(iter(data_loader))
    e1_subj_id, e1_subj_w, e1_verb_id, e1_verb_w, e1_obj_id, e1_obj_w, \
    e2_subj_id, e2_subj_w, e2_verb_id, e2_verb_w, e2_obj_id, e2_obj_w, \
    gold = batch

    if option.use_gpu:
        e1_subj_id = e1_subj_id.cuda()
        e1_subj_w = e1_subj_w.cuda()
        e1_verb_id = e1_verb_id.cuda()
        e1_verb_w = e1_verb_w.cuda()
        e1_obj_id = e1_obj_id.cuda()
        e1_obj_w = e1_obj_w.cuda()
        e2_subj_id = e2_subj_id.cuda()
        e2_subj_w = e2_subj_w.cuda()
        e2_verb_id = e2_verb_id.cuda()
        e2_verb_w = e2_verb_w.cuda()
        e2_obj_id = e2_obj_id.cuda()
        e2_obj_w = e2_obj_w.cuda()

    e1_emb = model(e1_subj_id, e1_subj_w, e1_verb_id, e1_verb_w, e1_obj_id,
                   e1_obj_w)
    e2_emb = model(e2_subj_id, e2_subj_w, e2_verb_id, e2_verb_w, e2_obj_id,
                   e2_obj_w)

    if option.distance_metric == 'cosine':
        distance_func = cosine_distance
    elif option.distance_metric == 'euclid':
        distance_func = euclid_distance

    pred = -distance_func(e1_emb, e2_emb)

    if option.use_gpu:
        pred = pred.cpu()
    pred = pred.detach().numpy()
    gold = gold.numpy()
    spearman_correlation, spearman_p = scipy.stats.spearmanr(pred, gold)

    if option.output_file.strip() != '':
        output_file = open(option.output_file, 'w')
        for score in pred:
            output_file.write(str(score) + '\n')
        output_file.close()
        logging.info('Output saved to ' + option.output_file)

    logging.info('Spearman correlation: %.4f' % (spearman_correlation, ))
コード例 #11
0
def main():
    train_loader = Data(train_path, shuffle=True, split=0.2)
    pca = train_loader.dataloading(batch_size=batch_size,
                                   pca=PCA(n_components=pca_components))
    test_loader = Data(test_path, shuffle=False, test=True)
    test_loader.dataloading(pca=pca)

    model = NN(in_dim=pca_components + 1,
               n_cls=3,
               neurons=[256],
               lr=lr,
               hidden_activation='sigmoid',
               load_weight=False,
               save_weight=False)
    info = {
        'train_loss': [],
        'train_acc': [],
        'val_loss': [],
        'val_acc': [],
        'EPOCHS': EPOCHS
    }
    for epoch in range(EPOCHS):
        cache = np.zeros(4)
        itr = 0
        for train_data in train_loader:
            x_train, y_train, x_val, y_val = train_data
            y_train = to_onehot(y_train, 3)
            y_val = to_onehot(y_val, 3)
            train_loss, train_acc = model.train(x_train, y_train)
            val_loss, val_acc, pred = model.predict(x_val, y_val)
            cache[0] += train_loss
            cache[1] += train_acc
            cache[2] += val_loss
            cache[3] += val_acc
            itr += 1
        cache /= itr
        info['train_loss'].append(cache[0])
        info['train_acc'].append(cache[1])
        info['val_loss'].append(cache[2])
        info['val_acc'].append(cache[3])
        print(
            'EPOCH:{:05d}/{:05d}  train_loss: {:.5f}  train_acc: {:.4f}  val_loss: {:.5f}  val_acc: {:.4f}'
            .format(epoch + 1, EPOCHS, *cache.tolist()))
    plot_info(**info)
    train_loader.set_batch_size(2000)
    for train_data in train_loader:
        x_train, y_train, x_val, y_val = train_data
        y_train = to_onehot(y_train, 3)
        plot_decision_region(x_train,
                             y_train,
                             model,
                             train_loader.CLASSES,
                             title='training')

    for test_data in test_loader:
        x_test, y_test = test_data
        y_test = to_onehot(y_test, 3)
        plot_decision_region(x_test,
                             y_test,
                             model,
                             train_loader.CLASSES,
                             title='testing')
        test_loss, test_acc, pred = model.predict(x_test, y_test)
    print('test_loss: {:.5f} test_acc: {:.4f}'.format(test_loss, test_acc))
コード例 #12
0
            #            act_funcs=[tf.nn.relu for _ in range(layer)],
            #            batch_size=BATCH_SIZE,
            #            learning_rate=lr,
            #            test_samples=test_samples,
            #            dropout=True,
            #            batch_norm=False)
            #
            # fd = 'nn_d/{}l_{}n_{}lr/{}'.format(layer, node, lr, i)
            # losses = train_model(model, train_samples, fd)
            # loss_data[fd].append(losses)

            # training on a generic neural network model with batch norm
            model = NN(inputs=N_INPUTS,
                       hidden_size=[node for _ in range(layer)],
                       act_funcs=[tf.nn.leaky_relu for _ in range(layer)],
                       batch_size=BATCH_SIZE,
                       learning_rate=lr,
                       test_samples=test_samples,
                       dropout=False,
                       batch_norm=True)

            fd = 'nn_bn/{}l_{}n_{}lr/{}'.format(layer, node, lr, i)
            losses = train_model(model, train_samples, fd)
            loss_data[fd].append(losses)

        # save the losses to csv
        for fd in loss_data.keys():
            np.savetxt(os.path.join(fd, 'results.csv'),
                       np.asarray(loss_data[fd]),
                       delimiter=',')
コード例 #13
0
from chainer.training import extensions

import numpy as np
import sys

from model import NN, CNN

# 各パラメータの定義
# バッチサイズは、データをまとめる数。
# エポックは、学習回数を表す。
batch_size = 100
epoch_size = 3
modelname = "mnist_NN.h5"

# 学習モデルの定義
model = NN()
if (len(sys.argv) > 1) and (sys.argv[1] == "CNN"):
    model = CNN()
    modelname = "mnist_CNN.h5"

# データセットの読み込み。
# 今回は、MNIST(エムニスト)という手書き文字データセットを使用。
# 学習データとテストデータで分ける。
# train, testのそれぞれには、1次元配列の画像情報(list)と、
# 画像の答え(int)の2つの情報が入っている。
# image size = 784 -> (1 x 28 x 28)の3次元に指定。
train, test = chainer.datasets.get_mnist(ndim=3)

# 学習データのイテレーションを作成
# バッチサイズ分のデータに区切る。
iterator = chainer.iterators.SerialIterator(train, batch_size)
コード例 #14
0
import gym
from model import NN
import tensorflow as tf
import numpy as np
import random
import math


env = gym.make('Breakout-v0')

nn = NN()
nn2 = NN()
init_op = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init_op)

discount = 0.99
full_reward = []
total_reward = 0


def choose_action(Q):
    action = np.argmax(Q)
    if epsilon > random.random():
        action = env.action_space.sample()
    return action

print(env.action_space)

for i_episode in range(1, 2000):
    observation = env.reset()
コード例 #15
0
ファイル: main.py プロジェクト: hiive/deep-zertz
from mcts import MCTS
from zertz.ZertzGame import ZertzGame as Game
from model import NNetWrapper as NN
from config import Config, Config1, Config2

if __name__ == '__main__':
    # Game settings
    rings = 19
    marbles = {'w': 10, 'g': 10, 'b': 10}
    win_con = [{'w': 2}, {'g': 2}, {'b': 2}, {'w': 1, 'g': 1, 'b': 1}]
    t = 5

    # Setup
    game = Game(rings, marbles, win_con, t)
    config = Config()
    nnet = NN(game, config)

    # Option #1: Learn
    trainer = Individual(game, nnet, config)
    trainer.learn()

    # Option #2: Human vs AI
    #nnet.load_checkpoint(filename='checkpoint_32_10_0001_29.pth.tar')
    #ai_agent = MCTS(game, nnet, config.c_puct, config.num_sims)
    #hp = HumanPlay(game, ai_agent)
    #hp.play()

    # Option #3: AI vs AI
    #config1 = Config1()
    #config2 = Config2()
    #nnet1 = NN(game, config1)
コード例 #16
0
ファイル: train.py プロジェクト: lxlxlxlxlxlx/chatbot
    def __len__(self):
        return self.n_samples

# Hyperparameters
batch_size = 8
learning_rate = 0.001
num_epochs = 1000
hidden_size = 8
output_size = len(tags)
input_size = len(X_train[0])
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

dataset = ChatDataset()
train_loader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True, num_workers=2)

model = NN(input_size, hidden_size, output_size).to(device)

criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

start_t = time()
for epoch in range(num_epochs):
    for (words, labels) in train_loader:
        words = words.to(device)
        labels = labels.to(device)

        #forward
        outputs = model(words)
        loss = criterion(outputs, labels)

        #backward and optimizer step
コード例 #17
0
ファイル: train.py プロジェクト: GitMark0/wow-auction-tool
            print('Early stopping')
            break
        last_loss_test = loss_test
        print('epoch {}, loss - {}'.format(epoch, loss))
        print('test loss - {}'.format(loss_test))
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()


if __name__ == '__main__':
    data = pd.read_csv('datasets/anchor-weed.csv')
    scaler.fit_transform(data[['price']])

    np.random.seed(100)
    nr_classes = 6
    hidden_size = 12
    data = pd.read_csv('datasets/anchor-weed-prepared.csv')
    data = data.drop('Unnamed: 0', axis=1)
    y = data['price+1'].values
    X = data.drop('price+1', axis=1).values
    model = NN([nr_classes, 8, 4, 1])
    train(model, X, y, 30000, 0.01)
    torch.save(model.state_dict(), 'model_state/state')

    y_pred = model.forward(torch.from_numpy(X[275, :]).float())
    y_pred = np.exp(y_pred.detach().numpy().reshape(-1, 1))
    y_ = np.exp(y[275])

    plot_results(model, X, y)
コード例 #18
0
ファイル: main.py プロジェクト: mikezhang95/EWC_extension
fisher_size = 1
loss_option = 0
fisher_true = False
fisher_block = False
fisher_diagonal = False

# experiment 1: CNN on permuted MNIST
learning_rate = 100
fisher_multiplier = 10
input_size = 50
output_size = 1
hidden_units = 25
# generate data
train_data,P = data.generate_data(num_tasks_test,samples_train,input_size)
test_data,_ = data.generate_data(num_tasks_test,samples_test,input_size)
# generate model
model = NN(input_size,output_size,hidden_units=hidden_units)


def main():
    
    experiment(P,fisher_multiplier,learning_rate,model,train_data,test_data,num_tasks_train,num_tasks_test,iterations,batch_size,log_period_updates,fisher_size,loss_option=loss_option,fisher_true=fisher_true,fisher_block=fisher_block,fisher_diagonal=fisher_diagonal,verbose=True)

    print(tf.trainable_variables())
    return


if __name__ == "__main__":
    main()

コード例 #19
0
                         batch_size=1,
                         shuffle=False)

nb_teams = next(iter(train_loader))[0].size(1) // 2

print(nb_teams)

training_loss_history = []
test_loss_history = []
test_accuracy_history = []

########## Train the model ##########

model = NN(nb_teams=nb_teams,
           learning_rate=CHOSEN_LEARNING_RATE,
           hidden_layer_size1=CHOSEN_HIDDEN_LAYER_SIZES[0],
           hidden_layer_size2=CHOSEN_HIDDEN_LAYER_SIZES[1],
           d_ratio=CHOSEN_DROPOUT_RATE)

best_loss = (1000, 0)
best_accuracy = (0, 0)
for epoch in tqdm(range(CHOSEN_EPOCH)):

    losses_training = []
    for data, target in train_loader:
        data = Variable(data)
        target = Variable(target)
        loss = model.step(data, target)
        losses_training.append(loss)

    # Mean of the losses of training predictions
コード例 #20
0
import torch as tc
from model import NN
from champion_dictionary import *

net = NN()
net.load_state_dict(tc.load("model.wab"))

input_data = tc.rand((10), dtype=tc.float)
out = net(input_data)
print(input_data)

if out < 0.5:
    print("Blue team")
    print(out)

else:
    print("Red team")
    print(out)
コード例 #21
0
def transform_image(image_str):
    my_transforms = transforms.Compose([
        transforms.Resize((448, 448)),
        transforms.Grayscale(),
        transforms.ToTensor(),
        transforms.Normalize(mean = [0.9823], std = [0.0758]),
    ])
    cv2_image = data_uri_to_cv2_img(image_str)
    image = Image.fromarray(cv2_image)
    # image = Image.open(StringIO(image_bytes))
    return my_transforms(image).unsqueeze(0)


PATH = 'models/model_feature_predictor.ckpt'
model = NN.load_from_checkpoint(PATH)

pretrained_model = model_gan.model_gan
pretrained_model.load_state_dict(torch.load('models/model_sketch_simplification.pth'))

feature_map = {
    1: "Cross, Upper left corner, outside rectangle",
    2: "Large Rectangle",
    3: "Diagonal Cross",
    4: "Horizontal midline of 2",
    5: "Vertical Midline",
    6: "Small rectangle within 2 to the left",
    7: "Small segment above 6",
    8: "Four parallel lines within 2, upper left",
    9: "Triangle above 2, upper right",
    10: "Small vertical line within 2, below 9",
コード例 #22
0
ファイル: train.py プロジェクト: AxelBremer/DL4NLP
def train(config):
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # Initialize the device which to run the model on
    device = torch.device(device)

    # Initialize the dataset and data loader (note the +1)
    dataset = IMDBDataset(train_or_test='train', seq_length=config.seq_length)
    data_loader = DataLoader(dataset,
                             config.batch_size,
                             shuffle=True,
                             num_workers=4)

    # Initialize the dataset and data loader (note the +1)
    test_dataset = IMDBDataset(train_or_test='test',
                               seq_length=config.seq_length)
    test_data_loader = DataLoader(test_dataset,
                                  config.batch_size,
                                  shuffle=True,
                                  num_workers=4)

    # Initialize the model that we are going to use

    if not (config.recurrent_dropout_model):
        model = NN(dataset.vocab_size, config.embed_dim, config.hidden_dim,
                   config.output_dim, config.n_layers, config.bidirectional,
                   config.dropout, 0).to(device)
    else:
        model = Model(dataset.vocab_size,
                      output_dim=config.output_dim).to(device)

    if not os.path.exists(f'runs/{config.name}'):
        os.makedirs(f'runs/{config.name}')

    print(config.__dict__)

    with open(f'runs/{config.name}/args.txt', 'w') as f:
        json.dump(config.__dict__, f, indent=2)

    # Setup the loss and optimizer
    criterion = nn.CrossEntropyLoss().to(device)
    # criterion = torch.nn.MSELoss().to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=config.learning_rate)
    lowest = 100
    save = []
    epochs = 0

    while epochs < config.train_epochs:
        accuracies = []
        losses = []
        print('Training')
        for step, (batch_inputs, batch_targets) in enumerate(data_loader):

            x = batch_inputs.long().to(device)
            y_target = batch_targets.long().to(device)

            predictions = model(x)

            loss = criterion(predictions, y_target)
            optimizer.zero_grad()
            loss.backward()

            # torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=config.max_norm)
            optimizer.step()
            accuracy = (torch.argmax(predictions,
                                     dim=1) == y_target).cpu().numpy().mean()
            loss = loss.item()

            accuracies.append(accuracy)
            losses.append(loss)

        accuracy = np.array(accuracies).mean()
        loss = np.array(losses).mean()

        # Test on test set
        print('Testing')
        with torch.no_grad():
            test_accuracies = []
            test_losses = []
            for step, (batch_inputs,
                       batch_targets) in enumerate(test_data_loader):

                x = batch_inputs.long().to(device)
                y_target = batch_targets.long().to(device)

                predictions = model(x)

                test_loss = criterion(predictions, y_target)

                test_accuracy = (torch.argmax(
                    predictions, dim=1) == y_target).cpu().numpy().mean()
                test_loss = test_loss.item()

                test_accuracies.append(test_accuracy)
                test_losses.append(test_loss)

        test_accuracy = np.array(test_accuracies).mean()
        test_loss = np.array(test_losses).mean()

        if (test_loss < lowest):
            lowest = test_loss
            torch.save(model.state_dict(), f'runs/{config.name}/model.pt')

        epochs += 1
        print(
            "[{}] Train epochs {:04d}/{:04d}, Train Accuracy = {:.2f}, Train Loss = {:.3f}, Test Accuracy = {:.2f}, Test Loss = {:.3f}"
            .format(datetime.now().strftime("%Y-%m-%d %H:%M"), epochs,
                    config.train_epochs, accuracy, loss, test_accuracy,
                    test_loss))

    print('Done training.')
    return accuracy, lowest, save
コード例 #23
0
# 必要なものをインポート
import chainer
from chainer import training, Variable
from chainer import datasets, iterators, optimizers, serializers
from chainer.training import extensions

import numpy as np
import sys
from PIL import Image, ImageOps

from model import NN, CNN

# 各パラメータ
model = NN()
image_name = ""
CNN_flag = False
Invert = False

# コマンドライン引数を設定
def set_args():
	global CNN_flag, image_name, Invert

	if (len(sys.argv) > 1):
		if(sys.argv[1] == "CNN"):
			CNN_flag = True

	if (len(sys.argv) > 2):
		image_name = sys.argv[2]

	if (len(sys.argv) > 3):
		Invert = True
コード例 #24
0
    def __getitem__(self, index):
        return self.x_train[index], self.y_train[index]

    def __len__(self):
        return len(self.x_train)


dataset = Data()
train_loader = DataLoader(dataset=dataset,
                          batch_size=32,
                          shuffle=True,
                          num_workers=0)

#Declaring network and optimizer
print("Initializing network")
network = NN()
optimizer = optim.Adam(network.parameters(), lr=0.0005)
criterion = nn.BCELoss(reduction='mean')
correct = 0

for epoch in range(5):
    for i, data in enumerate(train_loader, 0):
        # get the inputs
        inputs, labels = data

        # Forward pass: Compute predicted y by passing x to the model
        y_pred = network(inputs)

        # Compute and print loss
        loss = criterion(y_pred, labels.view((-1, 1)))
        print(f'Epoch {epoch + 1} | Batch: {i+1} | Loss: {loss.item():.4f}')
    dataset = TransitiveSentenceSimilarityDataset(option.dataset_file, glove)
    logging.info('Dataset loaded')

    embeddings = nn.Embedding(option.vocab_size, option.emb_dim, padding_idx=1)
    if option.model == 'NTN':
        model = NeuralTensorNetwork(embeddings, option.em_k)
    elif option.model == 'LowRankNTN':
        model = LowRankNeuralTensorNetwork(embeddings, option.em_k,
                                           option.em_r)
    elif option.model == 'RoleFactor':
        model = RoleFactoredTensorModel(embeddings, option.em_k)
    elif option.model == 'Predicate':
        model = PredicateTensorModel(embeddings)
    elif option.model == 'NN':
        model = NN(embeddings, 2 * option.em_k, option.em_k)
    elif option.model == 'EMC':
        model = EMC(embeddings, 2 * option.em_k, option.em_k)
    else:
        logging.info('Unknown model type: ' + option.model)
        exit(1)

    checkpoint = torch.load(option.model_file)
    if type(checkpoint) == dict:
        if 'event_model_state_dict' in checkpoint:
            state_dict = checkpoint['event_model_state_dict']
        else:
            state_dict = checkpoint['model_state_dict']
    else:
        state_dict = checkpoint
    model.load_state_dict(state_dict)
コード例 #26
0
                                   batch_size=32,
                                   shuffle=True)

    classes = train.classes
    device = torch.device("cuda:0")
    load_weights = False

    #
    if use_pretrained_resnet:
        nn_model = models.resnet50(pretrained=True)
        num_ftr = nn_model.fc.in_features
        nn_model.fc = nn.Linear(num_ftr, 2)
        nn_model.to(device)

    else:
        nn_model = NN()

        if load_weights:
            print("Found State Dict. Loading...")
            with open(r'state/state_dict.pickle', 'rb') as file:
                state_dict = torch.load(r'state/state_dict.pickle')
            nn_model.load_state_dict(state_dict)
        nn_model.to(device)

        learning_rate = 0.0001
        f1_scores = []
        epochs = 1
        for epoch in range(epochs):
            predicted_cumulated, labels_cumulated = np.array([]), np.array([])
            running_loss = 0
            counter = 0
コード例 #27
0
    daily_return = reduce(lambda left, right: pd.concat([left, right], axis=1),
                          df_list)
daily_return = np.mean(daily_return, axis=1).fillna(0)
daily_return = daily_return.fillna(0)
data = DataSplit(instance.unprep_data, daily_return, 20100101, 20180630,
                 20181231, 20190630, 7)

# In[5]:

#######################################################################################
####  Model Framework  ################################################################
#######################################################################################
tot_mod = Model(data)  # -- PARENT CLASS of Models
benchmark_model = BenchPredict(data_model=data, lb=28,
                               la=1)  # -- BENCHMARK CLASS
NN_model = NN(data_model=data, lb=28, la=1)  # -- NEURAL NETWORK CLASS
RNN_model = RNN(data_model=data, lb=28, la=1)  # -- RNN CLASS
ARMA_data = DataSplit(instance.unprep_data, instance.unprep_data['return'],
                      20100101, 20180630, 20181231, 20190630,
                      7)  # -- ARMA DATA PROCESS
ARMA_model = Arma(data_model=ARMA_data)  # -- ARMA CLASS
Embed_model = combine_model(RNN_model, ARMA_model)

# In[9]:

try:
    get_ipython().system('jupyter nbconvert --to python constants.ipynb')
except:
    pass

# In[ ]:
コード例 #28
0
ファイル: test.py プロジェクト: AxelBremer/DL4NLP
def test(config):
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # Initialize the device which to run the model on
    device = torch.device(device)

    with open(f'runs/{config.name}/args.txt', 'r') as f:
        config.__dict__ = json.load(f)

    # Initialize the dataset and data loader (note the +1)
    test_dataset = IMDBDataset(train_or_test='test',
                               seq_length=config.seq_length)
    test_data_loader = DataLoader(test_dataset,
                                  batch_size=config.batch_size,
                                  shuffle=True,
                                  num_workers=4)

    # Initialize the model that we are going to use
    if not (config.recurrent_dropout_model):
        model = NN(test_dataset.vocab_size, config.embed_dim,
                   config.hidden_dim, config.output_dim, config.n_layers,
                   config.bidirectional, config.dropout, 0).to(device)
        model.load_state_dict(torch.load(f'runs/{config.name}/model.pt'))
    else:
        model = Model(test_dataset.vocab_size,
                      output_dim=config.output_dim).to(device)
        model.load_state_dict(torch.load(f'runs/{config.name}/model.pt'))

    # Setup the loss and optimizer
    criterion = torch.nn.MSELoss().to(device)
    lowest = 100
    save = []
    epochs = 0
    num_steps = math.floor(25000 / config.batch_size)

    d = {'target': [], 'mean0': [], 'std0': [], 'mean1': [], 'std1': []}

    with torch.no_grad():
        for step, (batch_inputs, batch_targets) in enumerate(test_data_loader):

            x = batch_inputs.long().to(device)
            y_target = batch_targets.long().to(device)

            # print('\n*************************************************\n')
            # print(test_dataset.convert_to_string(x[0].tolist()))

            preds = torch.zeros((100, x.shape[0], config.output_dim))

            for i in range(config.B):
                preds[i, :, :] = model(x)

            # print('\n')

            if step % 1 == 0:
                print(step, '/', num_steps)

            mean = preds.mean(dim=0)
            std = preds.std(dim=0)

            d['target'].extend(batch_targets.tolist())
            d['mean0'].extend(mean[:, 0].tolist())
            d['std0'].extend(std[:, 0].tolist())
            d['mean1'].extend(mean[:, 1].tolist())
            d['std1'].extend(std[:, 1].tolist())

    pd.DataFrame(d).to_csv(f'runs/{config.name}/results.csv')
    return 'joe'
コード例 #29
0
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np

df = pd.read_csv("breast-cancer-wisconsin.data",
                 index_col=0,
                 na_values=['?'],
                 header=None)
df.columns = [1, 2, 3, 4, 5, 6, 7, 8, 9, "Diagnosis"]
df.index.name = "ID"
df = df.fillna(df.mean())

X = df[[1, 2, 3, 4, 5, 6, 7, 8, 9]].to_numpy()
Y = df[["Diagnosis"]].to_numpy()  # - 2) / 2

nn_model = NN([9, 9, 1])  # 10, 10, 10, 10, 1])
nn_model.import_set(X, Y, normalization=True, ratio=[7, 2, 1])
theta, cost_hist = nn_model.train_model((3, 1), max_iter=10000)

training_score = nn_model.cost_function(theta, nn_model.sets['train'])
cv_score = nn_model.cost_function(theta, nn_model.sets['cv'])
test_score = nn_model.cost_function(theta, nn_model.sets['test'])

print(nn_model.f1_score(theta, nn_model.sets['train']))
print(nn_model.f1_score(theta, nn_model.sets['cv']))
print(nn_model.f1_score(theta, nn_model.sets['test']))

print("training cost:", training_score)
print("cross validation cost:", cv_score)
print("test cost:", test_score)
コード例 #30
0
import cv2
from model import NN
import numpy as np
import torch

cap = cv2.VideoCapture(0)
i = 0 
classify = 1 
labels =[] 
Model = NN(batch_size = 1)
Model.load_state_dict(torch.load("1"))
Model.eval()
tardict = {1 : 'Face Detected' , 0 : 'Undetected'  }

while True:
    i += 1
    ret  , frame = cap.read()
    gray = cv2.cvtColor(frame , cv2.COLOR_RGB2GRAY)
    gray = cv2.GaussianBlur(gray, (15,15), 0)
    cv2.imshow('feed' , frame)
    gray = torch.from_numpy(gray).view(1 , 1, 480 , 640).float()
    output = torch.round(Model.forward(gray))
    output = output.item()
    print (tardict[output])
    if output != 0:
        input()
    if cv2.waitKey(1) & 0xFF == ord('q') :
        break