コード例 #1
0
def train():
    logger.info("loading training data --->")
    with open(path_nlp_data, 'r', encoding="utf-8-sig") as f:
        reader = csv.reader(f)
        training_data = [(remove_stopwords(row[0]), row[1]) for row in reader]
    '''NER TAGS extracted using BIOS TAGS'''
    list_of_nnp, list_of_day, list_of_rt, list_of_dayperiod = ner_tagging(
        path_ner_data)
    '''TRAINING USING NB + CV'''
    vectorizer, clf, le = training(training_data, list_of_nnp, list_of_day,
                                   list_of_dayperiod)
    '''SAVING MODELS AND DATA '''
    logger.info("trained NB on training data and saving models --->")
    pickle_dump(clf, model_NB)
    pickle_dump(le, label_enco)
    pickle_dump(vectorizer, semhash_feature)
    pickle_dump(list_of_nnp, nnp)
    pickle_dump(list_of_day, day)
    pickle_dump(list_of_rt, rt)
    pickle_dump(list_of_dayperiod, dayperiod)
コード例 #2
0
ファイル: neural_net.py プロジェクト: hduongtrong/ScikitFlow
 def fit(self, data_function):
     # Need to add support for fit(X, Y)
     # Also support for out of memory data
     PrintMessage()
     with tf.Graph().as_default():
         n, input_dim = data_function.train.images.shape
         output_dim = data_function.validation.labels.shape[1]
         images_placeholder = tf.placeholder(tf.float32, 
                 shape=(None, input_dim))
         labels_placeholder = tf.placeholder(tf.float32, 
                 shape=(None, output_dim))
         logits = _construct_nn3(images_placeholder, [input_dim, 
                 self.hidden1_dim, self.hidden2_dim, output_dim])
         loss = loss_dict[self.loss](logits, labels_placeholder)
         score = score_dict[self.loss](logits, labels_placeholder)
         train_op = training(loss)
         summary_op = tf.merge_all_summaries()
         saver = tf.train.Saver()
         sess = tf.Session()
         init = tf.initialize_all_variables()
         sess.run(init)
         summary_writer = tf.train.SummaryWriter("./MNIST_data/",
                 graph_def = sess.graph_def)
         for step in xrange( self.n_epoch * n / self.batch_size):
             batch_xs, batch_ys = data_function.train.next_batch(
                     self.batch_size)
             feed_dict = {images_placeholder: batch_xs,
                          labels_placeholder: batch_ys}
             _, loss_value = sess.run([train_op, loss], 
                                      feed_dict = feed_dict)
             if step % (n / self.batch_size) == 0:
                 summary_str = sess.run(summary_op, feed_dict=feed_dict)
                 summary_writer.add_summary(summary_str, step)
                 feed_dict = {images_placeholder: 
                                     data_function.validation.images,
                              labels_placeholder: 
                                     data_function.validation.labels}
                 valid_loss, valid_score = sess.run([loss, score],
                         feed_dict = feed_dict) 
                 PrintMessage(data_function.train.epochs_completed,
                         loss_value, valid_loss, valid_score)
コード例 #3
0
ファイル: build_problem.py プロジェクト: yu54ku/xml-cnn
    def run(self, trial=None):
        params = self.params
        is_ps = params["params_search"]
        term_size = shutil.get_terminal_size().columns

        # Show Hyper Params
        if trial is not None:
            sequence_length = params["sequence_length"]
            hyper_params = get_hyper_params(trial, sequence_length)
            self.params.update(hyper_params)
            0 < trial.number and print("\n")
            out_str = " Trial: {} ".format(trial.number + 1)
            print(out_str.center(term_size, "="))
            print("\n" + " Current Hyper Params ".center(term_size, "-"))
            print([i for i in sorted(hyper_params.items())])
            print("-" * shutil.get_terminal_size().columns + "\n")

        # Generate Batch Iterators
        train_loader = data.Iterator(
            self.train,
            batch_size=params["batch_size"],
            device=params["device"],
            train=True,
        )

        valid_loader = data.Iterator(
            self.valid,
            batch_size=params["batch_size"],
            device=params["device"],
            train=False,
            sort=False,
        )

        if not is_ps:
            test_loader = data.Iterator(
                self.test,
                batch_size=params["batch_size"],
                device=params["device"],
                train=False,
                sort=False,
            )

        # Calc Batch Size
        params["train_batch_total"] = math.ceil(
            len(self.train) / params["batch_size"])

        params["valid_batch_total"] = math.ceil(
            len(self.valid) / params["batch_size"])

        if not is_ps:
            params["test_batch_total"] = math.ceil(
                len(self.test) / params["batch_size"])

        # Define xml-cnn model
        model = xml_cnn(params, self.TEXT.vocab.vectors)
        model = model.to(params["device"])
        epochs = params["epochs"]
        learning_rate = params["learning_rate"]

        # Define Optimizer
        optimizer = optim.Adam(model.parameters(), lr=learning_rate)

        if not is_ps:
            ms = [int(epochs * 0.5), int(epochs * 0.75)]
            scheduler = MultiStepLR(optimizer, milestones=ms, gamma=0.1)

        best_epoch = 1
        num_of_unchanged = 1

        measure = params["measure"]
        measure = "f1" in measure and measure[:-3] or measure
        if not is_ps:
            save_best_model_path = params["model_cache_path"] + "best_model.pkl"
        # 学習
        for epoch in range(1, epochs + 1):
            if self.params["params_search"]:
                out_str = " Epoch: {} ".format(epoch)
            else:
                lr = scheduler.get_last_lr()[0]
                term_size = shutil.get_terminal_size().columns
                out_str = " Epoch: {} (lr={:.20f}) ".format(epoch, lr)
            # out_str = " Epoch: {} ".format(epoch)
            print(out_str.center(term_size, "-"))

            # 学習
            training(params, model, train_loader, optimizer)

            # 検証
            val_measure_epoch_i = validating_testing(params, model,
                                                     valid_loader)

            # 最良モデルの記録と保存
            if epoch < 2:
                best_val_measure = val_measure_epoch_i
                (not is_ps) and torch.save(model, save_best_model_path)
            elif best_val_measure < val_measure_epoch_i:
                best_epoch = epoch
                best_val_measure = val_measure_epoch_i
                num_of_unchanged = 1
                (not is_ps) and torch.save(model, save_best_model_path)
            else:
                num_of_unchanged += 1

            # Show Best Epoch
            out_str = " Best Epoch: {} (" + measure + ": {:.10f}, "
            out_str = out_str.format(best_epoch, best_val_measure)
            if bool(params["early_stopping"]):
                remaining = params["early_stopping"] - num_of_unchanged
                out_str += "ES Remaining: {}) "
                out_str = out_str.format(remaining)
            else:
                out_str += "ES: False) "
            print("\n" + out_str.center(term_size, "-") + "\n")

            # Early Stopping
            if early_stopping(num_of_unchanged, params["early_stopping"]):
                break

            (not is_ps) and scheduler.step()

        if is_ps:
            # Show Best Trials
            if self.best_trial_measure < best_val_measure:
                self.best_trial_measure = best_val_measure
                self.num_of_trial = trial.number + 1
            out_str = " Best Trial: {} (" + measure + ": {:.20f}) "
            out_str = out_str.format(self.num_of_trial,
                                     self.best_trial_measure)
            print(out_str.center(term_size, "="))
        else:
            # Testing on Best Epoch Model
            model = torch.load(save_best_model_path)
            test_measure = validating_testing(params,
                                              model,
                                              test_loader,
                                              is_valid=False)
            out_str = " Finished "
            print("\n\n" + out_str.center(term_size, "=") + "\n")

            out_str = " Best Epoch: {} (" + measure + ": {:.20f}) "
            out_str = out_str.format(best_epoch, test_measure)
            print("\n" + out_str.center(term_size, "-") + "\n")

        return 1 - best_val_measure
コード例 #4
0
ファイル: main.py プロジェクト: jocelyn1981/Co-GAT
if args.data_dir == "dataset/mastodon":
    metric = False
else:
    metric = True
if not os.path.exists(args.save_dir):
    os.makedirs(args.save_dir)

dev_best_sent, dev_best_act = 0.0, 0.0
test_sent_sent, test_sent_act = 0.0, 0.0
test_act_sent, test_act_act = 0.0, 0.0

for epoch in range(0, args.num_epoch + 1):
    print("Training Epoch: {:4d} ...".format(epoch), file=sys.stderr)

    train_loss, train_time = training(
        model, data_house.get_iterator("train", args.batch_size, True), 10.0,
        args.bert_learning_rate, args.pretrained_model)
    print("[Epoch{:4d}], train loss is {:.4f}, cost {:.4f} s.".format(
        epoch, train_loss, train_time))

    dev_sent_f1, _, _, dev_act_f1, _, _, dev_time = evaluate(
        model, data_house.get_iterator("dev", args.batch_size, False), metric)
    test_sent_f1, sent_r, sent_p, test_act_f1, act_r, act_p, test_time = evaluate(
        model, data_house.get_iterator("test", args.batch_size, False), metric)

    print("On dev, sentiment f1: {:.4f}, act f1: {:.4f}".format(
        dev_sent_f1, dev_act_f1))
    print("On test, sentiment f1: {:.4f}, act f1 {:.4f}".format(
        test_sent_f1, test_act_f1))
    print("Dev and test cost {:.4f} s.\n".format(dev_time + test_time))
コード例 #5
0
def problem_solving(nb, problem, problem_languages, args, time_start):

    if problem_languages[nb] == "pl":
        pass

    print(problem)
    local_path = get_problem_truth(args.c, problem)
    print(local_path)
    problem_collection, number_of_texts = tagging_problem(
        local_path, problem_languages[nb])

    print('tagged')

    authors = make_authors_list(problem_collection)
    print('authors defined')

    freq1 = args.freq1
    freq2 = args.freq2

    training_set_size, test_set_size = set_sizes(problem_collection)

    random.seed(time.time())

    trunc_words1, trunc_words2 = create_char_ngrams_stat(
        problem_collection, freq2, problem_languages[nb])

    problem_collection = filter_problem_corpus(problem_collection,
                                               trunc_words1, trunc_words2,
                                               problem_languages[nb])

    problem_collection, nb_categories = create_ngrams_and_splitgrams(
        problem_collection)

    words_encoder, words_num = stats_for_ngrams_and_skipgrams(
        problem_collection, nb_categories, freq1)

    freq_feature, words_num = vectorise_problem_corpus(problem_collection,
                                                       words_encoder,
                                                       words_num, frequency,
                                                       number_of_texts)

    freq_feature_form_norm, network_sizes = compute_mean_and_std(
        freq_feature, problem_collection, words_num)

    model_test = define_model(network_sizes, len(authors), len(words_encoder))
    optimiser_test = define_optimiser(model_test)
    bceloss = torch.nn.NLLLoss()
    if use_cuda:
        bceloss = bceloss.cuda()

    mseloss = torch.nn.MSELoss()
    if use_cuda:
        mseloss = mseloss.cuda()

    model = training(model_test, training_set_size, problem_collection,
                     authors, bceloss, optimiser_test, freq_feature_form_norm)

    print('after training')

    result = testing(problem_collection, model, authors,
                     freq_feature_form_norm)

    print('after testing')

    with open(os.path.join(args.o, 'answers-{}.json'.format(problem)),
              'w') as outfile:
        json.dump(result, outfile)

    time_now = time.time()

    timing = time_now - time_start
    print(as_minutes(timing))

    print('sdadkashdksadfksahfksafhksadhf')
    return
コード例 #6
0
def run_test_train_dataset(selected_model):

    #Create the sess, and use some options for better using gpu
    print("Create session")
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True  # Do not assign whole gpu memory, just use it on the go
    config.allow_soft_placement = True  # If a operation is not define it the default device, let it execute in another.
    sess = tf.Session(config=config)

    with tf.device('/cpu:0'):
        #input setup
        #train dataset setup
        train_dataset = train_dataset_prepare(FLAGS)
        train_iterator = train_dataset.make_one_shot_iterator()
        #test dataset setup
        test_dir = '/scratch_net/ofsoundof/yawli/BSD68/'
        test_iterator = test_dataset_prepare(
            test_path_prepare, test_dir,
            FLAGS.sigma).make_initializable_iterator()
        #create dataset handle and iterator
        handle = tf.placeholder(tf.string, shape=[])
        iterator = tf.data.Iterator.from_string_handle(
            handle, train_dataset.output_types, train_dataset.output_shapes)
        image_gt, image_n = iterator.get_next()

    with tf.device('/device:GPU:' + FLAGS.sge_gpu):
        image_gt = image_gt / 255 * MAX_RGB
        image_n = image_n / 255 * MAX_RGB

        global_step = tf.Variable(1, name='global_step', trainable=False)
        image_dn = selected_model(image_n, FLAGS)
        MSE_dn, PSNR_dn = comp_mse_psnr(image_dn, image_gt, MAX_RGB)
        MSE_n, PSNR_n = comp_mse_psnr(image_n, image_gt, MAX_RGB)
        PSNR_gain = PSNR_dn - PSNR_n
        loss = tf.reduce_sum(tf.squared_difference(
            image_gt, image_dn)) + tf.reduce_sum(
                tf.losses.get_regularization_losses())
        train_op, global_step = training(loss, FLAGS, global_step)

    #train summary
    tf.summary.scalar('MSE_dn', MSE_dn)
    tf.summary.scalar('PSNR_dn', PSNR_dn)
    tf.summary.scalar('MSE_n', MSE_n)
    tf.summary.scalar('PSNR_n', PSNR_n)
    tf.summary.scalar('PSNR_gain', PSNR_gain)
    slim.summarize_collection(collection=tf.GraphKeys.TRAINABLE_VARIABLES)
    #test summaries
    psnr_validate = tf.placeholder(tf.float32)
    tf.summary.scalar('psnr_validate', psnr_validate)
    merged = tf.summary.merge_all()

    #Get the dataset handle
    train_handle = sess.run(train_iterator.string_handle())
    test_handle = sess.run(test_iterator.string_handle())

    print("Create checkpoint directory")
    # if not FLAGS.restore:
    FLAGS.checkpoint = FLAGS.checkpoint + '_' + datetime.datetime.now(
    ).strftime("%Y-%m-%d-%H-%M-%S")
    with open('checkpoint.txt', 'w'
              ) as text_file:  #save at the current directory, used for testing
        text_file.write(FLAGS.checkpoint)
    LOG_DIR = os.path.join('/scratch_net/ofsoundof/yawli/logs',
                           FLAGS.checkpoint)
    # LOG_DIR = os.path.join('/home/yawli/Documents/hashnets/logs', FLAGS.checkpoint )
    # assert (not os.path.exists(LOG_DIR)), 'LOG_DIR %s already exists'%LOG_DIR

    print("Create summary file writer")
    train_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'train'),
                                         sess.graph)

    print("Create saver")
    saver = tf.train.Saver(max_to_keep=100)

    #Always init, then optionally restore
    print("Initialization")
    sess.run(tf.global_variables_initializer())

    if FLAGS.restore:
        print('Restore model from checkpoint {}'.format(
            tf.train.latest_checkpoint(LOG_DIR)))
        saver.restore(sess, tf.train.latest_checkpoint(LOG_DIR))
        checkpoint_filename = 'checkpoint'
    else:
        checkpoint_filename = 'checkpoint'
    # checkpoint_filename = 'checkpoint'
    score_all = []
    model_index = []
    image_path_dn, _ = test_path_prepare(test_dir, FLAGS.sigma)
    image_path_dn = [
        os.path.join(LOG_DIR, os.path.basename(i)) for i in image_path_dn
    ]
    i = sess.run(global_step) + 1
    print("Start iteration")
    while i <= FLAGS.max_iter:
        if i % FLAGS.summary_interval == 0:
            # test set5
            sess.run(test_iterator.initializer)
            score_per = np.zeros((4, 69))
            for j in range(68):

                img_dn, img_n, img_gt = sess.run(
                    [image_dn, image_n, image_gt],
                    feed_dict={handle: test_handle})
                scipy.misc.toimage(np.squeeze(img_dn) * 255 / MAX_RGB,
                                   cmin=0,
                                   cmax=255).save(image_path_dn[j])
                # The current scipy version started to normalize all images so that min(data) become black and max(data)
                # become white. This is unwanted if the data should be exact grey levels or exact RGB channels.
                # The solution: scipy.misc.toimage(image_array, cmin=0.0, cmax=255).save('...') or use imageio library
                # from IPython import embed; embed();
                score = image_converter(np.squeeze(img_dn), np.squeeze(img_n),
                                        np.squeeze(img_gt), False, 0, MAX_RGB)
                score_per[:, j] = score
            score_per[:, -1] = np.mean(score_per[:, :-1], axis=1)
            print('PSNR results for Set5: SR {}, Bicubic {}'.format(
                score_per[1, -1], score_per[0, -1]))

            time_start = time.time()
            [_, i, l, mse_dn, mse_n, psnr_dn, psnr_n, psnr_gain, summary] =\
                sess.run([train_op, global_step, loss, MSE_dn, MSE_n, PSNR_dn, PSNR_n, PSNR_gain, merged],
                         feed_dict={handle: train_handle, psnr_validate: score_per[1, -1]})#, options=options, run_metadata=run_metadata)
            # from IPython import embed; embed(); exit()
            duration = time.time() - time_start
            train_writer.add_summary(summary, i)
            print(
                "Iter %d, total loss %.5f; denoise (mse %.5f, psnr %.5f); noise (mse %.5f, psnr %.5f); psnr_gain:%f"
                % (i - 1, l, mse_dn, psnr_dn, mse_n, psnr_n, psnr_gain))
            print('Training time for one iteration: {}'.format(duration))

            if (i - 1) % FLAGS.checkpoint_interval == 0:
                save_path = saver.save(sess,
                                       os.path.join(LOG_DIR, 'model'),
                                       global_step=i,
                                       latest_filename=checkpoint_filename,
                                       write_meta_graph=True)
                print("Saved checkpoint to {}".format(save_path))
                print("Flushing train writer")
                train_writer.flush()

            model_index.append(i)
            score_all.append([
                score_per[0, -1], score_per[1, -1],
                score_per[1, -1] - score_per[0, -1], score_per[2, -1],
                score_per[3, -1], score_per[3, -1] - score_per[2, -1]
            ])
        else:
            [_, i, _] = sess.run([train_op, global_step, loss],
                                 feed_dict={handle: train_handle})

    #write to csv files
    descriptor5 = 'Average PSNR (dB)/SSIM for Set5, Scale ' + str(
        FLAGS.upscale) + ', different model parameters during training' + '\n'
    descriptor = [descriptor5, '', '', '', '', '', '']
    header_mul = ['Iteration'] + ['N', 'DN', 'Gain'] * 2
    model_index_array = np.expand_dims(np.array(model_index), axis=0)
    score_all_array = np.transpose(np.array(score_all))
    written_content = np.concatenate((model_index_array, score_all_array),
                                     axis=0)
    written_content_fmt = ['{:<8}'] + ['{:<8.4f}', '{:<8.4f}', '{:<8.4f}'] * 2
    content = content_generator_mul_line(descriptor, written_content,
                                         written_content_fmt, header_mul)
    file_writer(os.path.join(LOG_DIR, 'psnr_ssim.csv'), 'a', content)

    #write to pickle files
    variables = {
        'index': np.array(model_index),
        'noise': score_all_array[0, :],
        'denoise': score_all_array[1, :],
        'gain': score_all_array[2, :]
    }
    pickle_out = open(os.path.join(LOG_DIR, 'psnr_bsd68.pkl'), 'wb')
    pickle.dump(variables, pickle_out)
    pickle_out.close()
コード例 #7
0
                      dim=1).squeeze()  # size: batch_size * 600
        p = self.drop(p)
        out = self.linear(p).squeeze()

        return out


### load the SST dataset ###
train_iter, val_iter, test_iter, TEXT, LABEL = loadSST()

### train the model ###

# CNN-non-static
model = CNN(len(TEXT.vocab), TEXT.vocab.vectors.size(1), TEXT.vocab.vectors)
optimizer = optim.Adadelta(model.parameters(), lr=0.1)

# CNN-static
# model = CNN(len(TEXT.vocab),TEXT.vocab.vectors.size(1),TEXT.vocab.vectors,static=True)
# optimizer = optim.Adadelta(filter(lambda p: p.requires_grad, model.parameters()),lr=0.1)

# CNN-multichannel
# model = CNN_2channel(len(TEXT.vocab),TEXT.vocab.vectors.size(1),TEXT.vocab.vectors)
# optimizer = optim.Adadelta(filter(lambda p: p.requires_grad, model.parameters()),lr=0.1)

num_epoch = 10
model = training(train_iter, model, num_epoch, optimizer)

### test the model ###

test_loss, accuracy_test = testing(test_iter, model)
コード例 #8
0
def problem_solving(nb, problem, problem_languages, args, time_start):

    if True:

        #problem = 'problem00001'
        #nb = 0
        if problem_languages[nb] == "pl":
            pass  #continue
        #if (nb != 0 and nb != 0):
        #    continue

        print(problem)
        local_path = get_problem_truth(args.c, problem)
        print(local_path)
        #global problem_collection
        problem_collection, number_of_texts = tagging_problem(
            local_path, problem_languages[nb])

        print('tagged')

        #gc.collect()
        #save_tools(problem_collection_, 'problem_collection_anfang')
        #save_tools(number_of_texts, 'number_of_texts')

        #problem_collection_ = load_tools('problem_collection_anfang')
        #number_of_texts = load_tools('number_of_texts')
        #save_tools(number_of_texts, 'number_of_texts')
        authors = make_authors_list(problem_collection)
        print('authors defined')

        #quit()
        results = []

        #frequency = random.choice([200,220,240,260,280,300])
        #freq1 = random.choice([100,150,200,250,300,350])
        #freq2 =  random.choice([100,150,200,250,300,350])
        #frequency_ = [200,220,240,260,280,300]
        #freq1_ = [100,150,200,250,300,350]
        #freq2_ =  [100,150,200,250,300,350]

        if True:  #for x in range(1):
            #break
            #problem_collection = copy.deepcopy(problem_collection_)

            #frequency = 3000#random.choice([500,600,800,1000,1200,1500])
            #freq1 = 100#random.choice([100,150,200,250,300,350])
            #freq2 =  200#random.choice([100,150,200,250,300,350])

            #training_set_size, test_set_size = set_sizes(problem_collection)

            #random.seed(time.time())

            #print(frequency, freq1, freq2)
            #trunc_words1, trunc_words2 = create_char_ngrams_stat(problem_collection, freq1, freq2, problem_languages[nb])

            #problem_collection = filter_problem_corpus(problem_collection, trunc_words1, trunc_words2, problem_languages[nb])

            #problem_collection, nb_categories = create_ngrams_and_splitgrams(problem_collection)

            #words_encoder, words_num = stats_for_ngrams_and_skipgrams(problem_collection, nb_categories, frequency)

            #problem_collection, freq_feature, words_num = vectorise_problem_corpus(problem_collection, words_encoder, words_num, frequency, number_of_texts)

            #freq_feature_form_norm, pca, network_sizes = compute_mean_and_std(freq_feature, problem_collection,words_num)
            ################################
            #noisy_labels = cluster_test(problem_collection, len(freq_feature), authors, freq_feature_form_norm)

            frequency = 8000  #random.choice([500,600,800])
            freq1 = 400  #random.choice([100,150,200,250,300,350])
            freq2 = 1000  #random.choice([100,150,200,250,300,350])

            training_set_size, test_set_size = set_sizes(problem_collection)

            random.seed(time.time())

            print(frequency, freq1, freq2)
            #del problem_collection
            trunc_words1, trunc_words2 = create_char_ngrams_stat(
                problem_collection, freq1, freq2, problem_languages[nb])

            problem_collection = filter_problem_corpus(problem_collection,
                                                       trunc_words1,
                                                       trunc_words2,
                                                       problem_languages[nb])

            problem_collection, nb_categories = create_ngrams_and_splitgrams(
                problem_collection)

            words_encoder, words_num = stats_for_ngrams_and_skipgrams(
                problem_collection, nb_categories, frequency)

            freq_feature, words_num = vectorise_problem_corpus(
                problem_collection, words_encoder, words_num, frequency,
                number_of_texts)

            freq_feature_form_norm, pca, network_sizes = compute_mean_and_std(
                freq_feature, problem_collection, words_num)

            #result = cluster_test(problem_collection, len(freq_feature), authors, freq_feature_form_norm)

            #save_tools(problem_collection, 'problem_collection')
            #save_tools(words_encoder, 'words_encoder')
            #save_tools(words_num, 'words_num')
            #save_tools(freq_feature, 'freq_feature')

            #problem_collection = load_tools('problem_collection')
            #words_encoder = load_tools('words_encoder')
            #words_num = load_tools('words_num')
            #freq_feature = load_tools('freq_feature')
            #print('tutaj')

            #global model_test
            #model_train = define_model(network_sizes, len(authors), freq_feature_form_norm,len(words_encoder))
            model_test = define_model(network_sizes,
                                      len(authors), freq_feature_form_norm,
                                      len(words_encoder))
            #model = define_model(network_sizes, len(authors), freq_feature_form_norm,len(words_encoder))

            #global optimiser_test

            #optimiser_train = define_optimiser(model_train)
            optimiser_test = define_optimiser(model_test)
            bceloss = torch.nn.NLLLoss()
            if use_cuda:
                bceloss = bceloss.cuda()

            mseloss = torch.nn.MSELoss()
            if use_cuda:
                mseloss = mseloss.cuda()

            #global model
            model = training([None, model_test], training_set_size,
                             problem_collection, authors, bceloss, mseloss,
                             (None, optimiser_test), freq_feature_form_norm,
                             None)

            print('after training')

            result = testing(problem_collection, model, authors,
                             freq_feature_form_norm, None)

            print('after testing')

            with open(os.path.join(args.o, 'answers-{}.json'.format(problem)),
                      'w') as outfile:
                json.dump(result, outfile)

            #results.append(result)

            del model_test, optimiser_test, bceloss, mseloss, outfile
            #gc.collect()
            del freq_feature_form_norm, pca, network_sizes, result, freq_feature, words_num
            #gc.collect()
            del trunc_words1, trunc_words2, nb_categories, words_encoder, training_set_size, test_set_size
            #gc.collect()
            del problem_collection, model
            #del globals()['problem_collection'], globals()['model']
            #del globals()['optimiser_test']
            #del globals()['model_test']
            #gc.collect()
            time_now = time.time()

            timing = time_now - time_start
            print(as_minutes(timing))

            #gc.collect()

        del number_of_texts, authors
        gc.collect()

        #save_tools(results, problem)
        #quit()

        #quit()

    print('sdadkashdksadfksahfksafhksadhf')
    return