示例#1
0
 def __init__(self, hidden_size: int, embedding_size: int, 
              num_layers: int, bidirectional: bool):
     self.hidden_size = hidden_size
     self.embedding_size = embedding_size
     self.num_layers = num_layers
     self.bidirectional = bidirectional
     self.encoder = RNN(self.hidden_size, self.embedding_size, 
                        self.num_layers, self.bidirectional)
     self.decoder = RNN(self.hidden_size, self.embedding_size,
                        self.num_layers, self.bidirectional)
示例#2
0
 def btn_clk_train(self):
     if os.path.isfile('params.pkl'):
         os.remove('params.pkl')
     time.sleep(2)
     self.rnn = RNN()
     self.rnn.train()
     QMessageBox.information(self, "RNN", "train finished")
示例#3
0
def test(option="lstm", file_desc=""):
    if FLAGS.seq is None:
        ones = np.random.choice(np.arange(FLAGS.seqlen),
                                FLAGS.val,
                                replace=False)
        seq = np.zeros(FLAGS.seqlen)
        seq[ones] = 1
    else:
        seq = np.array(FLAGS.seq).astype(np.float32)
    seq = np.expand_dims(seq, axis=1)
    sess = tf.Session()
    if option == "lstm":
        lstm = LSTM(sess, FLAGS.hidden, FLAGS.seqlen)
    elif option == "rnn":
        lstm = RNN(sess, FLAGS.hidden, FLAGS.seqlen)
    sess.run(tf.global_variables_initializer())
    print("\n\nLoading model/{}_lstm.pkl...".format(file_desc))
    with open("model/{}_lstm.pkl".format(file_desc), 'rb') as file:
        lstm_weights = pickle.load(file)
    print("\n\nLoading model/{}_dense.pkl...\n\n".format(file_desc))
    with open("model/{}_dense.pkl".format(file_desc), 'rb') as file:
        dense_weights = pickle.load(file)
    lstm.load_weights(lstm_weights, dense_weights)
    print(seq.reshape(-1))
    predictions = lstm.test(seq)
    print(np.argmax(predictions))
示例#4
0
    def test_networks(self):
        """
        Test the networks saved by run(). Save to json
        """
        data = self.__get_test_data()
        result = {}
        for (min_speakers, max_speakers) in [[1, 10], [1, 20]]:
            result_for_trainset = {}
            for feature_type in self.feature_options:
                result_for_feature = {}
                # Load best performing model
                network = RNN()
                name = f'./trained_networks_with_augmentation/rnn_train_{min_speakers}_{max_speakers}/{feature_type}'
                network.load_from_file(name)

                # Test performance
                for test_name, test_data_current in data.items():
                    x, y = test_data_current['x'], test_data_current['y']
                    result_for_feature[test_name] = self.__test_net(
                        network, x, y, feature_type)

                result_for_trainset[feature_type] = result_for_feature
            result[
                f'train_{min_speakers}_{max_speakers}'] = result_for_trainset
        with open('experiment_networks_tested.json', 'w+') as fp:
            json.dump(result, fp)
        return result
示例#5
0
def train(vocab_size, state_size, bptt_truncate, model_path, data_path,
          num_epochs, learning_rate, model_dir):
    # create an RNN, if possible load pre-existing model parameters
    if model_path:
        model_parameters = load_model_parameters(model_path)
        model = RNN(vocab_size, state_size, bptt_truncate, model_parameters)
    else:
        model = RNN(vocab_size, state_size, bptt_truncate)

    # construct datasets
    training_data, validation_data, test_data = \
    parse_reddit_data(vocab_size, data_path)

    # train the model
    model.sgd(training_data, num_epochs, learning_rate, validation_data,
              test_data, model_dir)
def main():

    patterns = loadData('pict.dat')  # Pattern 1-11
    patterns_1_3 = [patterns[index, :].reshape(1, 1024) for index in range(3)]
    patterns_4_11 = [
        patterns[3 + index, :].reshape(1, 1024) for index in range(8)
    ]

    network = RNN(size=1024, sequential=False, random=False)
    network.init_weights(patterns_1_3)
    noises = np.arange(0, 100, 5)
    averages = 1000
    for i, pattern in enumerate(patterns_1_3):
        OGpattern = pattern.copy()
        nCorrect = np.zeros((noises.shape[0], 1))
        for k, noise in enumerate(noises):
            for j in range(averages):
                patternD = distort(OGpattern, noise)
                x_output = network.train(patternD)
                nCorrect[k][0] += ((np.count_nonzero(x_output == OGpattern)) /
                                   patternD.shape[1]) * 100

        nCorrect = nCorrect / averages
        plt.plot(noises, nCorrect, label=("Pattern " + str(i + 1)))

    plt.legend()
    plt.show()
示例#7
0
    def __init__(self, embed_mat, opt):
        super(Boost, self).__init__()

        # self.model1 = model1 = TextCNN1(embed_mat, opt)
        # self.model1 = load_model(model1, model_dir=opt['model_dir'], model_name='TextCNN1', name="layer_5_finetune_epoch_6_2017-08-15#15:22:03.params")
        # self.model2 = model2 = TextCNN1(embed_mat, opt)
        # self.model2 = load_model(model2, model_dir=opt['model_dir'], model_name='TextCNN1', name="layer_2_epoch_5_2017-08-02#11:25:22_0.4095.params")
        # self.model3 = model3 = TextCNN1(embed_mat, opt)
        # self.model3 = load_model(model3, model_dir=opt['model_dir'], model_name='TextCNN1', name="layer_3_finetune_epoch_6_2017-08-14#04:07:52.params")
        # self.model4 = model4 = TextCNN1(embed_mat, opt)
        # self.model4 = load_model(model4, model_dir=opt['model_dir'], model_name='TextCNN1', name="layer_4_finetune_epoch_6_2017-08-14#07:28:16.params")
        #self.model5 = model5 = TextCNN(embed_mat, opt)
        #self.model5 = load_model(model5, model_dir=opt['model_dir'], model_name='TextCNN', name="layer_5_epoch_5_2017-08-12#19:10:02_0.4102.params")
        # self.model6 = model6 = TextCNN(embed_mat, opt)
        # self.model6 = load_model(model6, model_dir=opt['model_dir'], model_name='TextCNN', name="layer_6_finetune_top1_char_epoch_6_2017-08-13#01:16:15.params")
        # self.model7 = model7 = TextCNN(embed_mat, opt)
        # self.model7 = load_model(model7, model_dir=opt['model_dir'], model_name='TextCNN', name="layer_7_finetune_top1_char_epoch_6_2017-08-13#02:52:58.params")
        # self.model8 = model8 = TextCNN(embed_mat, opt)
        # self.model8 = load_model(model8, model_dir=opt['model_dir'], model_name='TextCNN', name="layer_8_finetune_top1_char_epoch_6_2017-08-13#04:29:34.params")
        # self.model9 = model9 = TextCNN(embed_mat, opt)
        # self.model9 = load_model(model9, model_dir=opt['model_dir'], model_name='TextCNN', name="layer_9_finetune_top1_char_epoch_6_2017-08-13#10:34:04.params")
        # self.model10 = model10 = TextCNN(embed_mat, opt)
        # self.model10 = load_model(model10, model_dir=opt['model_dir'], model_name='TextCNN', name="layer_10_finetune_top1_char_epoch_6_2017-08-13#12:11:21.params")

        self.model1 = model1 = RNN(embed_mat, opt)
        self.model1 = load_model(
            model1,
            model_dir=opt['model_dir'],
            model_name='RNN',
            name="layer_1_char_epoch_6_2017-08-15#15:27:18.params")
def train_and_test_network():
    """
    Train a neural network and test it. Can also train on other feature types,
    or run the experimenter to run different configurations
    """
    min_speakers = 1
    max_speakers = 10

    # Load data from filesystem
    data_loader = DataLoader(train_dir, test_src_dr, test_dest_dir)
    data_loader.force_recreate = False
    data_loader.min_speakers = min_speakers
    data_loader.max_speakers = max_speakers

    # Train network
    train, (test_x, test_y) = data_loader.load_data()
    libri_x, libri_y = data_loader.load_libricount(libri_dir)

    # Train and test network
    file = 'testing_rnn'
    net = RNN()
    net.save_to_file(file)
    net.train(train, min_speakers, max_speakers, FEATURE_TYPE)

    net.load_from_file(file)

    timit_results = net.test(test_x, test_y, FEATURE_TYPE)
    libri_results = net.test(libri_x, libri_y, FEATURE_TYPE)
示例#9
0
def train(train_id_data, num_vocabs, num_taget_class):

    max_epoch = 200
    model_dir = "E:\Pycharm Project\FYP\RNN\Trained_models\save_models.ckpt"
    hps = RNN.get_default_hparams()
    hps.update(batch_size=150,
               num_steps=120,
               emb_size=100,
               enc_dim=150,
               vocab_size=num_vocabs + 1,
               num_target_class=num_taget_class)

    with tf.variable_scope("model"):
        model = RNN(hps, "train")

    sv = tf.train.Supervisor(is_chief=True,
                             logdir=model_dir,
                             summary_op=None,
                             global_step=model.global_step)

    # tf assign compatible operators for gpu and cpu
    tf_config = tf.ConfigProto(allow_soft_placement=True)

    with sv.managed_session(config=tf_config) as sess:
        local_step = 0
        prev_global_step = sess.run(model.global_step)

        train_data_set = SentimentDataset(train_id_data, hps.batch_size,
                                          hps.num_steps)
        losses = []

        while not sv.should_stop():
            fetches = [model.global_step, model.loss, model.train_op]
            a_batch_data = next(train_data_set.iterator)
            y, x, w = a_batch_data
            fetched = sess.run(fetches, {
                model.x: x,
                model.y: y,
                model.w: w,
                model.keep_prob: hps.keep_prob
            })

            local_step += 1
            _global_step = fetched[0]
            _loss = fetched[1]
            losses.append(_loss)
            if local_step < 10 or local_step % 10 == 0:
                epoch = train_data_set.get_epoch_num()
                print("Epoch = {:3d} Step = {:7d} loss = {:5.3f}".format(
                    epoch, _global_step, np.mean(losses)))
                _loss = []
                if epoch >= max_epoch: break

        print("Training is done.")
    sv.stop()

    # model.out_pred, model.out_probs
    freeze_graph(
        model_dir, "model/out_pred,model/out_probs",
        "Final_graph.tf.pb")  ## freeze graph with params to probobuf format
示例#10
0
    def __init__(self, embed_mat, opt):
        super(Boost_RNN1_char, self).__init__()

        self.model1 = model1 = RNN(embed_mat, opt)
        self.model1 = load_model(
            model1,
            model_dir=opt['model_dir'],
            model_name='RNN',
            name="layer_1_finetune_char_epoch_6_2017-08-15#15:27:18")
def translate():
    #data = LanguageLoader(en_path, fr_path, vocab_size, max_length)
    #rnn = RNN(data.input_size, data.output_size)
    model = RNN(data.input_size, data.output_size) 
    model.load_state_dict(torch.load('models/baseline.module'))
    vecs = data.sentence_to_vec("Madam  president<EOS>")
    print("in translate-- ",vecs)
    translation = model.eval(vecs)
    print("final result ",data.vec_to_sentence(translation))
示例#12
0
 def __init__(self, TRAIN_CONFIGS, GRU_CONFIGS, FFN_CONFIGS=None):
     self.TRAIN_CONFIGS = TRAIN_CONFIGS
     self.GRU_CONFIGS = self._process_gru_configs(GRU_CONFIGS)
     self.model = RNN(target=TRAIN_CONFIGS['target'],
                      **self.GRU_CONFIGS,
                      FFN_CONFIGS=FFN_CONFIGS)
     self.epochs_trained = 0
     self.trained = False
     # Storage for later
     self.loss = self.val_loss = self.train_y_hat = self.train_y_true = self.val_y_hat = self.val_y_true = None
示例#13
0
def compare_gradients():
    tRNN = RNN(K, m, eta, seq_length, init='normal')
    for X_chars, Y_chars in get_batch():
        num_grads = numerical_gradients(tRNN, X_chars, Y_chars, h)
        tRNN.train(X_chars, Y_chars, clip=False)
        for k in tRNN.weights:
            error = relative_error(tRNN.gradients[k], num_grads[k])
            print("\n%s error:" % k)
            print(error)
        exit()
示例#14
0
def getModel(tokenized_sentences, word_to_index):
    x_train = get_x_train(tokenized_sentences, word_to_index)
    y_train = get_y_train(tokenized_sentences, word_to_index)

    model = RNN(_VOCABULARY_SIZE, hidden_dim=_HIDDEN_DIM)
    train_with_sgd(model,
                   x_train,
                   y_train,
                   nepoch=_NEPOCH,
                   learning_rate=_LEARNING_RATE)
    return model
示例#15
0
    def __init__(self, embed_mat, opt):
        super(Emsemble, self).__init__()

        self.model1 = model1 = RNN(embed_mat, opt)
        self.model1 = load_model(model1,
                                 model_dir=opt['model_dir'],
                                 model_name='RNN')
        self.model2 = model2 = TextCNN(embed_mat, opt)
        self.model2 = load_model(model2,
                                 model_dir=opt['model_dir'],
                                 model_name='TextCNN')
示例#16
0
def select_network(net_type, inp_size, hid_size, nonlin, rinit, iinit, cuda, lastk, rsize):
    if net_type == 'RNN':
        rnn = RNN(inp_size, hid_size, nonlin, bias=True, cuda=cuda, r_initializer=rinit, i_initializer=iinit)
    elif net_type == 'MemRNN':
        rnn = MemRNN(inp_size, hid_size, nonlin, bias=True, cuda=cuda, r_initializer=rinit, i_initializer=iinit)
    elif net_type == 'RelMemRNN':
        rnn = RelMemRNN(inp_size, hid_size, lastk, rsize, nonlin, bias=True, cuda=cuda, r_initializer=rinit, i_initializer=iinit)
    elif net_type == 'LSTM':
        rnn = LSTM(inp_size, hid_size, cuda)
    elif net_type == 'RelLSTM':
        rnn = RelLSTM(inp_size, hid_size, lastk, rsize, cuda)
    return rnn
示例#17
0
def main(_):
    check_dir()
    print_config()
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)
    run_option = tf.ConfigProto(gpu_options=gpu_options)
    with tf.Session(config=run_option) as sess:
        rnn = RNN(config=FLAGS, sess=sess)
        rnn.build_model()
        if FLAGS.is_training:
            rnn.train_model()
        if FLAGS.is_testing:
            rnn.test_model()
示例#18
0
 def MyRNN_H256(self, data, test_set=None):
     input_sizes, output_size, train_set, valid_set = data
     model = nn.Sequential(
         Squeeze,
         RNN(input_sizes[0], output_size, hidden_size=256, cuda=True))
     network = ANN("MyRNN_H256", model, cuda=True)
     network.train(train_set,
                   epochs=60,
                   batch_size=20,
                   criterion=nn.NLLLoss(),
                   optimizer=optim.Adam(model.parameters(), lr=0.01),
                   valid_set=valid_set)
     return network
示例#19
0
 def fitness(self):
     uow = UnitOfWork()
     genoWithSegSiz = [
         geno for geno in self._genomes if geno._genName == 'segment_size'
     ]
     if genoWithSegSiz == []:
         self._shelveDataFile = uow._dataSet().PreparingData()
     else:
         segment_size = genoWithSegSiz[0]._value
         self._shelveDataFile = uow._dataSet.PreparingData(segment_size)
     RNN = RNN(self._shelveDataFile, self._genomes)
     self._accuracy = cnn.RunAndAccuracy()
     return self._accuracy
示例#20
0
def main():
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    int_to_vocab, vocab_to_int, n_vocab, in_text, out_text = get_data_from_file(
        values.train_file, values.batch_size, values.seq_size)

    net = RNN(n_vocab, values.embedding_size, values.lstm_size)
    net = net.to(device)

    criterion, optimizer = get_loss_and_train_op(net, 0.001)

    net = train(net, criterion, optimizer, n_vocab, in_text, out_text,
                vocab_to_int, int_to_vocab, device)

    torch.save(net, '/data/myNet.pt')
示例#21
0
 def __train_net(self, files: np.ndarray, min_speakers: int,
                 max_speakers: int, feature_type: str, save_to: str):
     """
     Train a network
     :param files: The train files
     :param min_speakers: The min number of speakers to generate files for
     :param max_speakers: The max number of speakers to generate files for
     :param feature_type: The feature type to use
     :param save_to:  Location to save the best performing model to
     :return: RNN, history
     """
     network = RNN()
     network.save_to_file(save_to)
     _, history = network.train(files, min_speakers, max_speakers,
                                feature_type)
     return network, history
示例#22
0
def testSystem():
    t = Tokenizer()

    xTrain, yTrain = t.getData()
    np.random.seed(10)
    model = RNN(15000)
    o, s = model.forwardPropagation(xTrain[30])
    predictions = model.predict(xTrain[30])
    print(o.shape)
    print(o)
    print(predictions.shape)
    print(predictions)

    print("Expected Loss: \n" + str(np.log(model.vocab)))
    print("Actual Loss:")
    print(model.calculateLoss(xTrain[:100], yTrain[:100]))
示例#23
0
def train(option="lstm", file_desc=""):
    epochs = FLAGS.epochs
    batchsize = FLAGS.batchsize
    shuffle_x = np.random.RandomState(42)
    shuffle_y = np.random.RandomState(42)

    task = CountingGame2()
    x, y = task.generate(length=FLAGS.seqlen, samples=FLAGS.samples)
    test_x, test_y = task.generate(length=FLAGS.seqlen, samples=1)

    sess = tf.Session()
    if option == "lstm":
        lstm = LSTM(sess, FLAGS.hidden, FLAGS.seqlen)
    elif option == "rnn":
        lstm = RNN(sess, FLAGS.hidden, FLAGS.seqlen)

    sess.run(tf.global_variables_initializer())

    lstm_weights = sess.run(lstm.cells[0].lstm_weights)
    lstm.load_weights(lstm_weights)

    n_iters = len(x) / batchsize
    for i in np.arange(epochs):
        shuffle_x.shuffle(x)
        shuffle_y.shuffle(y)
        for j in np.arange(n_iters):
            start = int(j * batchsize)
            end = int(start + batchsize)
            loss, lstm_gradients = lstm.fit(x[start:end], y[start:end])
            lstm_gradients = utils.average_gradients(lstm_gradients)
            lstm_weights = [
                lstm_weights[i] - FLAGS.lr * grad
                for i, grad in enumerate(lstm_gradients)
            ]
            dense_weights = sess.run(lstm.dense_weights)
            lstm.load_weights(lstm_weights)
        if i % 5 == 0:
            print("\nEpoch #{} Loss: {}".format(i, loss))
            print(test_x[0])
            predictions = lstm.test(test_x[0])
            print(np.argmax(predictions))
            with open("model/{}_lstm.pkl".format(file_desc), 'wb') as file:
                pickle.dump(lstm_weights, file)
            with open("model/{}_dense.pkl".format(file_desc), 'wb') as file:
                pickle.dump(dense_weights, file)
def main():

    patterns = loadData('pict.dat')     # Pattern 1-11
    patterns_1_3 = [patterns[index,:].reshape(1,1024) for index in range(3) ]
    patterns_4_11 = [patterns[3+index,:].reshape(1,1024) for index in range(8) ]
    

    network = RNN(size=1024, sequential=False, random=True)
    network.init_weights(patterns_1_3)


    for index, pattern in enumerate(patterns_1_3):
        energi = network.layapunovFunction(pattern)
        print('Energi for pattern {}: {}'.format(index, energi))

    for index, pattern in enumerate(patterns_4_11):
        energi = network.layapunovFunction(pattern)
        print('Energi for distorted pattern {}: {}'.format(3 + index, energi))
def main():
    
    rnn = RNN(data.input_size, data.output_size)

    losses = []
    for epoch in range(num_epochs):
        print("=" * 50 + ("  EPOCH %i  " % epoch) + "=" * 50)
        for i, batch in enumerate(data.sentences(num_batches)):
            input, target = batch
            #print(target)
            loss, outputs = rnn.train(input, target.copy())
            losses.append(loss)

            if i % 100 == 0:
                print("Loss at step %d: %.2f" % (i, loss))
                print("Truth: \"%s\"" % data.vec_to_sentence(target))
                print("Guess: \"%s\"\n" % data.vec_to_sentence(outputs[:-1]))
                rnn.save()
    torch.save(rnn.state_dict(), "models/baseline.module")
示例#26
0
def main(args):
    new_model = args.new_model

    rnn = RNN()

    if not new_model:
        try:
            rnn.set_weights(config.rnn_weight)
        except:
            print("Either set --new_model or ensure {} exists".format(
                config.rnn_weight))
            raise

    rnn_input = []
    rnn_output = []
    for i in range(130):
        # print('Building {}th...'.format(i))
        input = np.load('./rnn_data/rnn_input_' + str(i) + '.npy')
        output = np.load('./rnn_data/rnn_output_' + str(i) + '.npy')
        # sequence pre-processing, for training LSTM the rnn_input must be (samples/episodes, time steps, features)
        input = pad_sequences(input,
                              maxlen=40,
                              dtype='float32',
                              padding='post',
                              truncating='post')
        output = pad_sequences(output,
                               maxlen=40,
                               dtype='float32',
                               padding='post',
                               truncating='post')
        rnn_input.append(input)
        rnn_output.append(output)

    input = rnn_input[0]
    output = rnn_output[0]
    for i in range(len(rnn_input) - 1):
        input = np.concatenate((input, rnn_input[i + 1]), axis=0)
        output = np.concatenate((output, rnn_output[i + 1]), axis=0)
        print(input.shape)
        print(output.shape)

    rnn.train(input, output)
    rnn.plot_loss()
示例#27
0
def run():
    l, V = pl.load_words()
    # `l` is list of sentences split into words
    # `V` is a dict() mapping word with index in vocabulary

    # Convert words to respective indices
    for i in range(len(l)):
        for j in range(len(l[i])):
            l[i][j] = V[l[i][j]]

    # Generate training data
    training_data = []
    for sent in l:
        training_data.append((sent[:-1], sent[1:]))
    """ Initializing RNN with hidden state of dimension 20x1 """
    rnet = RNN(20, len(V))
    rnet.train(training_data[:25],
               learning_rate=3.0,
               bptt_step=10,
               transform=lambda sent: [pl.one_hot(len(V), x) for x in sent])
def main():

    patterns = loadData('pict.dat')     # Pattern 1-11
    patterns_1_3 = [patterns[index,:].reshape(1,1024) for index in range(3) ]
    patterns_10_11 = [patterns[9+index,:].reshape(1,1024) for index in range(2) ]

    network = RNN(size=1024, sequential=True, random=True)
    network.init_weights(patterns_1_3)




    # Testing if stable
    print('\nTesting if stable: ')
    plt.figure('Attractors - patterns')
    index = 0
    for pattern in patterns_1_3:
        index += 1
        plt.subplot(1, 3, index)
        plt.title('Pattern: {}'.format(index))
        x_output = network.train(pattern)
        print('Number of correct: {}/{} '.format(np.count_nonzero(x_output==pattern), pattern.shape[1]))
        plt.imshow(pattern.reshape(32,32), cmap='gray')





    # Testing for distorted patterns 9 and 10
    print('\nTesting for distorted patterns: ')
    index = 10
    for pattern in patterns_10_11:
        plt.figure('Output - pattern: {}'.format(index))
        index += 1
        sub_index = 0 
        x_output = network.train(pattern)
        for true_pattern in patterns_1_3:
            sub_index += 1
            print('Number of correct: {}/{} '.format(np.count_nonzero(x_output==true_pattern), true_pattern.shape[1]))
        plt.imshow(x_output.reshape(32,32), cmap='gray')
    plt.show()
示例#29
0
    def __init__(self):
        super().__init__()
        self.setupUi(self)
        self.trayIcon = QSystemTrayIcon(self)
        self.trayIcon.setIcon(QIcon('ui/icon.png'))
        self.trayIcon.activated.connect(self.restore_window)

        self.WM = WindowManager()
        self.pre_window = self.WM.get_fore_window()
        self.rnn = RNN()
        self.runState = False

        self.startButton.clicked.connect(self.btn_clk_start)
        self.startState = True
        self.trainButton.clicked.connect(self.btn_clk_train)
        self.helpButton.clicked.connect(self.btn_clk_help)
        self.helpState = True

        self.timer = QTimer(self)
        self.timer.start(200)
        self.timer.timeout.connect(self.run)
示例#30
0
def load_model(path,
               hyper,
               inference=True,
               dictionary_path=args.dictionary_path,
               LSTM=False):
    assert os.path.exists(
        path), 'directory for model {} could not be found'.format(path)
    voc_2_index, _, writer = load_dictionaries(dictionary_path)
    model = RNN(hyper['--embed_size'],
                hyper['--hidden_size'],
                len(voc_2_index),
                hyper['--num_layers'],
                add_writer=hyper['--writer_codes'],
                writer_number=len(writer),
                writer_embed_size=hyper['--writers_embeddings'],
                add_writer_as_hidden=hyper['--initialise_hidden'],
                LSTM=LSTM)
    #    lod = torch.load(os.path.join(path,'model.pt'))
    model.load_state_dict(torch.load(os.path.join(path, 'model.pt')))
    if inference:
        model.eval()
    return model