def generate_model(self):
        print("Gathering and processing tweets...")
        # Shuffle list of username-label tuples
        tuple_list = usermapping.data_tuples.items()

        # Split and grab tweets for users
        results = utils.flatten([ self.fetch_data(t)
                                  for t in tuple_list ])
         
        # TODO: Cross-validation generation
        trn_ratio = int(len(results) * 0.85)
        shuffle(results)
        print(len(results))
        print(trn_ratio)
        train = results[:trn_ratio]
        test = results[trn_ratio:]

        # Instantiate and train classifier
        print("Training...")
        cl = NaiveBayesClassifier(train)
        cl.train()
        
        # Save model
        print("Saving model...")
        utils.save_model(cl)

        # Classify test
        print("Testing...")
        print("Accuracy: {0}".format(cl.accuracy(test)))
        return cl
def main(model='lenet', num_epochs=500, min_epochs=100, improve_epochs=50,
         subset_sizes=None, validation_intervals=1,
         batchsize=500,
         sample_chooser=None, refine=False, out_path=None,
         csv_path=None, indices_out_path=None):
    if subset_sizes is None:
        subset_sizes = [500, 1000, 1500, 2000, 2500, 3000, 3500, 4000, 4500, 5000]

    subset_sizes = cmdline_helpers.coerce_num_list_parameter(subset_sizes,
                                                             num_type=int, name='subset_sizes')
    num_epochs = cmdline_helpers.coerce_num_list_parameter(num_epochs, N=len(subset_sizes),
                                                                  num_type=int, name='num_epochs')
    min_epochs = cmdline_helpers.coerce_num_list_parameter(min_epochs, N=len(subset_sizes),
                                                                  num_type=int, name='min_epochs')
    improve_epochs = cmdline_helpers.coerce_num_list_parameter(improve_epochs, N=len(subset_sizes),
                                                                  num_type=int, name='improve_epochs')
    validation_intervals = cmdline_helpers.coerce_num_list_parameter(validation_intervals, N=len(subset_sizes),
                                                                  num_type=int, name='validation_intervals')

    N_train, N_val, N_test = mnist_dataset.train_val_test_size()

    builder = mnist_architecture.network_builder(model)

    mnist = mnist_dataset.MNISTTrainValTest()

    trainer, indices_labelled_history, validation_error_history, test_error_history = \
        active_learning.active_learning_image_classifier(sample_chooser=sample_chooser, model_builder=builder, N_train=N_train,
                                                         batchsize=batchsize,
                                                         refine=refine, datasets_fn=mnist.datasets, subset_sizes=subset_sizes,
                                                         num_epochs=num_epochs,
                                                         min_epochs=min_epochs, improve_epochs=improve_epochs,
                                                         validation_intervals=validation_intervals,
                                                         batch_xform_fn=mnist_dataset.xform_mnist_batch,
                                                         n_train_repetitions_in_case_of_failure=3)

    print('Results:')
    print('N-train\t\tErr')
    for labelled_indices, err in zip(indices_labelled_history, test_error_history):
        print('{0}\t\t{1:.2f}%'.format(labelled_indices.shape[0], err * 100.0))

    if csv_path is not None:
        writer = csv.writer(open(csv_path, 'wb'))
        writer.writerow(['# samples', 'Error %'])
        for labelled_indices, err in zip(indices_labelled_history, test_error_history):
            writer.writerow([labelled_indices.shape[0], err * 100.0])

    if out_path is not None:
        print('Saving model to {0} ...'.format(out_path))
        utils.save_model(out_path, trainer.network)

    if indices_out_path is not None:
        np.save(indices_out_path, indices_labelled_history[-1])
示例#3
0
文件: models.py 项目: dmrd/dnn
    def pretrain(self, data,
                 epoch=[3000, 3000],
                 v_damping=[0.3, 1e-10],
                 w_init=[0.01, 0.1],
                 lr=[0.001, 0.002],
                 batch_size=64,
                 lr_schedule=trainers.lr_slow_start,
                 rbml1=None, rbml2=None,
                 rbml1_path=None, rbml2_path=None,
                 checkpoint=None):
        if rbml1 is None:
            rbml1 = ShapeRBM(self.num_v, self.num_h1, self.patches,
                             model_stat=trainers.CD_model(),
                             data=data,
                             v_damping=v_damping[0],
                             w_init=w_init[0],
                             double_up=True)
            rbml1.train(lr=lr[0], epoch=epoch[0], batch_size=batch_size,
                        data=data, lr_schedule=lr_schedule,
                        checkpoint=checkpoint)
            if rbml1_path:
                utils.save_model(rbml1, rbml1_path)
        self.rbml1 = rbml1

        if rbml2 is None:
            data_l2 = rbml1.expectation(1, [data, None])
            rbml2 = ShapeRBM(self.num_h1, self.num_h2,
                             patches=[slice(None, None, None)],
                             model_stat=trainers.CD_model(),
                             data=data_l2,
                             v_damping=v_damping[1],
                             w_init=w_init[1],
                             double_down=True)
            rbml2.train(lr=lr[1], epoch=epoch[1], batch_size=batch_size,
                        data=data_l2, lr_schedule=lr_schedule,
                        checkpoint=checkpoint)
            if rbml2_path:
                utils.save_model(rbml2, rbml2_path)
        self.rbml2 = rbml2

        # Combine parameters to full dbm
        self.connections[0].W = rbml1.connections[0].W.copy()
        self.connections[1].W = rbml2.connections[0].W.copy()
        self.layers[0].bias = rbml1.layers[0].bias.copy()
        self.layers[1].bias = rbml1.layers[1].bias + rbml2.layers[0].bias
        self.layers[2].bias = rbml2.layers[1].bias.copy()
        return
示例#4
0
 def evaluate(model,current_epoch, additional_test_length):
     # Evaluate the model
     logging.info('Evaluate')
     test_x = test_data['x']
     test_y = test_data['y']
     test_mask = test_data['mask']
     lengths = test_data['lengths']
     logging.info('-----------Evaluate Normal:{},{},{}-------------------'.format(MODEL_TYPE, DATA_TYPE, N_HIDDEN))
     do_evaluate(test_x, test_y, test_mask, lengths, test_data['t'] if USE_TIME_INPUT else None, test_batch=TEST_BATCH)
     # Evaluate the model on short data
     if additional_test_length > 0:
         logging.info('-----------Evaluate Additional---------------')
         test_x, test_y, test_mask, lengths, test_t = get_short_test_data(additional_test_length)
         do_evaluate(test_x, test_y, test_mask, lengths, test_t, test_batch=TEST_BATCH)
     logging.info('-----------Evaluate End----------------------')
     if not DEBUG:
         utils.save_model('{}-{}-{}-{}'.format(MODEL_TYPE,current_epoch, DATA_TYPE,N_HIDDEN), str(datetime.datetime.now()), model,'_new')
def train_lstm(model, input_path, validation_path, save_dir, step=3, batch_size=1024,
               iters=1000, save_every=1):
    _, seqlen, _ = model.input_shape
    train_gen = generate_arrays_from_file(input_path, seqlen=seqlen,
                                    step=step, batch_size=batch_size)
    samples, seed = train_gen.next()

    logger.info('samples per epoch %s' % samples)
    print 'samples per epoch %s' % samples
    last_epoch = model.metadata.get('epoch', 0)

    for epoch in range(last_epoch + 1, last_epoch + iters + 1):
        val_gen = generate_arrays_from_file(
            validation_path, seqlen=seqlen, step=step, batch_size=batch_size)
        val_samples, _ = val_gen.next()

        hist = model.fit_generator(
            train_gen,
            validation_data=val_gen,
            validation_steps=val_samples // batch_size,
            steps_per_epoch=samples // batch_size,
            epochs=1)

        val_loss = hist.history.get('val_loss', [-1])[0]
        loss = hist.history['loss'][0]
        model.metadata['loss'].append(loss)
        model.metadata['val_loss'].append(val_loss)
        model.metadata['epoch'] = epoch

        message = 'loss = %.4f   val_loss = %.4f' % (loss, val_loss)
        print message
        logger.info(message)
        print 'done fitting epoch %s' % epoch
        if epoch % save_every == 0:
            save_path = os.path.join(save_dir, ('epoch_%s' % ('%s' % epoch).zfill(5)))
            logger.info("done fitting epoch %s  Now saving mode to %s" % (epoch, save_path))
            save_model(model, save_path)
            logger.info("saved model, now generating a sample")

        generate_and_print(model, seed, 0.5, 1000)
def main(model='lenet', num_epochs=500, min_epochs=200, improve_epochs=250, batchsize=500,
         training_subset=None, aug_factor=1, out_path=None):
    # Load the dataset
    print("Loading data...")
    mnist = mnist_dataset.MNISTTrainValTest()

    # Generate the indices of the subset of the training set
    training_subset_indices = None
    if training_subset is not None:
        training_subset_indices = mnist.balanced_train_subset_indices(training_subset)

    # Get the train, validation and test sets
    train_ds, val_ds, test_ds = mnist.datasets(training_subset_indices)

    # Get network builder function for named model
    builder = mnist_architecture.network_builder(model)

    # Build the image classifier for the given model builder
    clf = image_classifier.ImageClassifier.for_model(builder)

    # Set verbosity
    clf.trainer.report(verbosity=trainer.VERBOSITY_EPOCH)

    # Set data transformation function
    clf.trainer.data_xform_fn(batch_xform_fn=mnist_dataset.xform_mnist_batch)

    # Set training length
    clf.trainer.train_for(num_epochs=num_epochs, min_epochs=min_epochs, val_improve_num_epochs=improve_epochs,
                          val_improve_epochs_factor=0)

    # Train
    clf.trainer.train(train_ds, val_ds, test_ds, batchsize=batchsize)

    if out_path is not None:
        print('Saving model to {0} ...'.format(out_path))
        utils.save_model(out_path, clf.network)
示例#7
0
        ngrams = generate_ngram(word_list, 3)
        for d in ngrams:
            root.add(d)
    print('------> 插入成功')


if __name__ == "__main__":
    root_name = basedir + "/data/root.pkl"
    stopwords = get_stopwords()
    if os.path.exists(root_name):
        root = load_model(root_name)
    else:
        dict_name = basedir + '/data/dict.txt'
        word_freq = load_dictionary(dict_name)
        root = TrieNode('*', word_freq)
        save_model(root, root_name)

    # 加载新的文章
    filename = 'data/demo.txt'
    data = load_data(filename, stopwords)
    # 将新的文章插入到Root中
    load_data_2_root(data)

    # 定义取TOP5个
    topN = 5
    result, add_word = root.find_word(topN)
    # 如果想要调试和选择其他的阈值,可以print result来调整
    # print("\n----\n", result)
    print("\n----\n", '增加了 %d 个新词, 词语和得分分别为: \n' % len(add_word))
    print('#############################')
    for word, score in add_word.items():
示例#8
0
matplotlib.use('Agg')
import matplotlib.pyplot as plt

import utils

train_csv_path = sys.argv[1]
model_path = sys.argv[2]

trainX, trainY = utils.load_train_data(train_csv_path)
print(f'\033[32;1mtrainX: {trainX.shape}, trainY: {trainY.shape}\033[0m')

if os.path.exists(model_path):
    model = utils.load_model(model_path)
else:
    model = PCA(n_components=2).fit(trainX)
    utils.save_model(model_path, model)

trainX2 = model.transform(trainX)
model = PCA(n_components=10).fit(trainX)
print('%.3f ' * len(model.explained_variance_) %
      tuple(model.explained_variance_),
      '%.3f ' * len(model.explained_variance_ratio_) %
      tuple(model.explained_variance_ratio_),
      sep='\n')

for c in np.unique(trainY):
    plt.scatter(trainX2[trainY == c, 0],
                trainX2[trainY == c, 1],
                s=0.1,
                label=str(c))
plt.legend()
示例#9
0
文件: main.py 项目: ypycsy/CDTL-PSE
def train(X_train,X_dev,X_test):
    # load data sets
    train_sentences = X_train
    dev_sentences = X_dev
    test_sentences = X_test

    train_sentences_loc = load_sentences(FLAGS.train_file_loc, FLAGS.lower, FLAGS.zeros)
    dev_sentences_loc = load_sentences(FLAGS.dev_file_loc, FLAGS.lower, FLAGS.zeros)
    test_sentences_loc = load_sentences(FLAGS.test_file_loc, FLAGS.lower, FLAGS.zeros)
    train_sentences_org = load_sentences(FLAGS.train_file_org, FLAGS.lower, FLAGS.zeros)
    dev_sentences_org = load_sentences(FLAGS.dev_file_org, FLAGS.lower, FLAGS.zeros)
    test_sentences_org = load_sentences(FLAGS.test_file_org, FLAGS.lower, FLAGS.zeros)
    train_sentences_per = load_sentences(FLAGS.train_file_per, FLAGS.lower, FLAGS.zeros)
    dev_sentences_per = load_sentences(FLAGS.dev_file_per, FLAGS.lower, FLAGS.zeros)
    test_sentences_per = load_sentences(FLAGS.test_file_per, FLAGS.lower, FLAGS.zeros)

    # Use selected tagging scheme (IOB / IOBES)
    update_tag_scheme(train_sentences, FLAGS.tag_schema)
    update_tag_scheme(test_sentences, FLAGS.tag_schema)

    update_tag_scheme(train_sentences_loc, FLAGS.tag_schema)
    update_tag_scheme(test_sentences_loc, FLAGS.tag_schema)
    update_tag_scheme(train_sentences_per, FLAGS.tag_schema)
    update_tag_scheme(test_sentences_per, FLAGS.tag_schema)
    update_tag_scheme(train_sentences_org, FLAGS.tag_schema)
    update_tag_scheme(test_sentences_org, FLAGS.tag_schema)

    # create maps if not exist
    if not os.path.isfile(FLAGS.map_file):
        # create dictionary for word
        if FLAGS.pre_emb:
            dico_chars_train = char_mapping(train_sentences, FLAGS.lower)[0]
            dico_chars, char_to_id, id_to_char = augment_with_pretrained(
                dico_chars_train.copy(),
                FLAGS.emb_file,
                list(itertools.chain.from_iterable(
                    [[w[0] for w in s] for s in test_sentences])
                )
            )
            dico_chars_train_loc = char_mapping(train_sentences_loc, FLAGS.lower)[0]
            dico_chars_loc, char_to_id_loc, id_to_char_loc = augment_with_pretrained(
                dico_chars_train_loc.copy(),
                FLAGS.emb_file,
                list(itertools.chain.from_iterable(
                    [[w[0] for w in s] for s in test_sentences_loc])
                )
            )
            dico_chars_train_per = char_mapping(train_sentences_per, FLAGS.lower)[0]
            dico_chars_per, char_to_id_per, id_to_char_per = augment_with_pretrained(
                dico_chars_train_per.copy(),
                FLAGS.emb_file,
                list(itertools.chain.from_iterable(
                    [[w[0] for w in s] for s in test_sentences_per])
                )
            )
            dico_chars_train_org = char_mapping(train_sentences_org, FLAGS.lower)[0]
            dico_chars_org, char_to_id_org, id_to_char_org = augment_with_pretrained(
                dico_chars_train_org.copy(),
                FLAGS.emb_file,
                list(itertools.chain.from_iterable(
                    [[w[0] for w in s] for s in test_sentences_org])
                )
            )
        else:
            _c, char_to_id, id_to_char = char_mapping(train_sentences, FLAGS.lower)
            _c_loc, char_to_id_loc, id_to_char_loc = char_mapping(train_sentences_loc, FLAGS.lower)
            _c_per, char_to_id_per, id_to_char_per = char_mapping(train_sentences_per, FLAGS.lower)
            _c_org, char_to_id_org, id_to_char_org = char_mapping(train_sentences_org, FLAGS.lower)

        # Create a dictionary and a mapping for tags
        _t, tag_to_id, id_to_tag = tag_mapping(train_sentences)
        _t_loc, tag_to_id_loc, id_to_tag_loc = tag_mapping(train_sentences_loc)
        _t_per, tag_to_id_per, id_to_tag_per = tag_mapping(train_sentences_per)
        _t_org, tag_to_id_org, id_to_tag_org = tag_mapping(train_sentences_org)
        with open(FLAGS.map_file, "wb") as f:
            pickle.dump([char_to_id, id_to_char, tag_to_id, id_to_tag,char_to_id_loc, id_to_char_loc, tag_to_id_loc, id_to_tag_loc,char_to_id_per, id_to_char_per, tag_to_id_per, id_to_tag_per,char_to_id_org, id_to_char_org, tag_to_id_org, id_to_tag_org], f)
    else:
        with open(FLAGS.map_file, "rb") as f:
            char_to_id, id_to_char, tag_to_id, id_to_tag,char_to_id_loc, id_to_char_loc, tag_to_id_loc, id_to_tag_loc,char_to_id_per, id_to_char_per, tag_to_id_per, id_to_tag_per,char_to_id_org, id_to_char_org, tag_to_id_org, id_to_tag_org = pickle.load(f)

    # prepare data, get a collection of list containing index
    train_data = prepare_dataset(
        train_sentences, char_to_id, tag_to_id, FLAGS.lower
    )
    dev_data = prepare_dataset(
        dev_sentences, char_to_id, tag_to_id, FLAGS.lower
    )
    test_data = prepare_dataset(
        test_sentences, char_to_id, tag_to_id, FLAGS.lower
    )
    print("%i / %i / %i sentences in train / dev / test." % (
        len(train_data),len(dev_data), len(test_data)))
    train_data_loc = prepare_dataset_ner(
        train_sentences_loc, char_to_id_loc, tag_to_id_loc, FLAGS.lower
    )
    dev_data_loc = prepare_dataset_ner(
        dev_sentences_loc, char_to_id_loc, tag_to_id_loc, FLAGS.lower
    )
    test_data_loc = prepare_dataset_ner(
        test_sentences_loc, char_to_id_loc, tag_to_id_loc, FLAGS.lower
    )
    print("%i / %i / %i sentences_loc in train / dev / test." % (
        len(train_data_loc), len(dev_data_loc), len(test_data_loc)))
    train_data_per = prepare_dataset_ner(
        train_sentences_per, char_to_id_per, tag_to_id_per, FLAGS.lower
    )
    dev_data_per = prepare_dataset_ner(
        dev_sentences_per, char_to_id_per, tag_to_id_per, FLAGS.lower
    )
    test_data_per = prepare_dataset_ner(
        test_sentences_per, char_to_id_per, tag_to_id_per, FLAGS.lower
    )
    print("%i / %i / %i sentences_per in train / dev / test." % (
        len(train_data_per), len(dev_data_per), len(test_data_per)))
    train_data_org = prepare_dataset_ner(
        train_sentences_org, char_to_id_org, tag_to_id_org, FLAGS.lower
    )
    dev_data_org = prepare_dataset_ner(
        dev_sentences_org, char_to_id_org, tag_to_id_org, FLAGS.lower
    )
    test_data_org = prepare_dataset_ner(
        test_sentences_org, char_to_id_org, tag_to_id_org, FLAGS.lower
    )
    print("%i / %i / %i sentences_org in train / dev / test." % (
        len(train_data_org), len(dev_data_org), len(test_data_org)))

    train_manager = BatchManager(train_data, FLAGS.batch_size)
    dev_manager = BatchManager(dev_data, 100)
    test_manager = BatchManager(test_data, 100)

    train_manager_loc = BatchManager(train_data_loc, FLAGS.batch_size)
    train_manager_per = BatchManager(train_data_per, FLAGS.batch_size)
    train_manager_org = BatchManager(train_data_org, FLAGS.batch_size)
    # make path for store log and model if not exist
    make_path(FLAGS)
    if os.path.isfile(FLAGS.config_file):
        config = load_config(FLAGS.config_file)
    else:
        config = config_model(char_to_id, tag_to_id,char_to_id_loc, tag_to_id_loc,char_to_id_per, tag_to_id_per,char_to_id_org, tag_to_id_org)
        save_config(config, FLAGS.config_file)
    make_path(FLAGS)

    log_path = os.path.join("log", FLAGS.log_file)
    logger = get_logger(log_path)
    print_config(config, logger)

    # limit GPU memory
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    steps_per_epoch = train_manager.len_data
    steps_per_epoch_loc = train_manager_loc.len_data
    steps_per_epoch_per = train_manager_per.len_data
    steps_per_epoch_org = train_manager_org.len_data
    model = create_model(Model, FLAGS.ckpt_path, load_word2vec, config, id_to_char, id_to_char_loc, id_to_char_per, id_to_char_org, logger)

    with tf.Session(config=tf_config, graph = model.graph ) as sess:

        sess.run(tf.global_variables_initializer())
        if config["pre_emb"]:
            emb_weights = sess.run(model.char_lookup.read_value())
            emb_weights_ner = sess.run(model.char_lookup.read_value())
            emb_weights, emb_weights_ner = load_word2vec(config["emb_file"], id_to_char, id_to_char_loc,id_to_char_per,id_to_char_org, config["char_dim"],
                                                    emb_weights, emb_weights_ner)
            sess.run(model.char_lookup.assign(emb_weights))
            logger.info("Load pre-trained embedding.")
        logger.info("start training")
        loss = []
        loss_loc = []
        loss_per = []
        loss_org = []
        for i in range(100):
            for batch_loc in train_manager_loc.iter_batch(shuffle=True):
                    step_loc, batch_loss_loc = model.run_step_ner(sess, True, batch_loc)
                    loss_loc.append(batch_loss_loc)
                    if step_loc % FLAGS.steps_check == 0:
                        iteration_loc = step_loc // steps_per_epoch_loc + 1
                        logger.info("iteration:{} step_loc:{}/{}, "
                                    "NER loss:{:>9.6f}".format(
                            iteration_loc, step_loc % steps_per_epoch_loc, steps_per_epoch_loc, np.mean(loss_loc)))
                        loss_loc = []
            for batch in train_manager.iter_batch(shuffle=True):
                step, batch_loss = model.run_step(sess, True, batch)
                loss.append(batch_loss)
                if step % FLAGS.steps_check == 0:
                    iteration_1 = step // steps_per_epoch + 1
                    logger.info("iteration:{} step:{}/{}, "
                            "SKILL loss:{:>9.6f}".format(
                        iteration_1, step % steps_per_epoch, steps_per_epoch, np.mean(loss)))
                loss = []
            precision_loc_dev = model.precision(sess, dev_manager, id_to_tag)
            precision_loc_test = model.precision(sess, test_manager, id_to_tag)
            for batch_per in train_manager_per.iter_batch(shuffle=True):
                    step_per, batch_loss_per = model.run_step_ner(sess, True, batch_per)
                    loss_per.append(batch_loss_per)
                    if step_per % FLAGS.steps_check == 0:
                        iteration_per = step_per // steps_per_epoch_per + 1
                        logger.info("iteration:{} step_per:{}/{}, "
                                    "NER loss:{:>9.6f}".format(
                            iteration_per, step_per % steps_per_epoch_per, steps_per_epoch_per, np.mean(loss_per)))
                        loss_per = []
            for batch in train_manager.iter_batch(shuffle=True):
                step, batch_loss = model.run_step(sess, True, batch)
                loss.append(batch_loss)
                if step % FLAGS.steps_check == 0:
                    iteration_2 = step // steps_per_epoch + 1
                    logger.info("iteration:{} step:{}/{}, "
                            "SKILL loss:{:>9.6f}".format(
                        iteration_2, step % steps_per_epoch, steps_per_epoch, np.mean(loss)))
                loss = []
            precision_per_dev = model.precision(sess, dev_manager, id_to_tag)
            precision_per_test = model.precision(sess, test_manager, id_to_tag)
            for batch_org in train_manager_org.iter_batch(shuffle=True):
                    step_org, batch_loss_org = model.run_step_ner(sess, True, batch_org)
                    loss_org.append(batch_loss_org)
                    if step_org % FLAGS.steps_check == 0:
                        iteration_org = step_org // steps_per_epoch_org + 1
                        logger.info("iteration:{} step_org:{}/{}, "
                                    "NER loss:{:>9.6f}".format(
                            iteration_org, step_org % steps_per_epoch_org, steps_per_epoch_org, np.mean(loss_org)))
                        loss_org = []
            for batch in train_manager.iter_batch(shuffle=True):
                step, batch_loss = model.run_step(sess, True, batch)
                loss.append(batch_loss)
                if step % FLAGS.steps_check == 0:
                    iteration_3 = step // steps_per_epoch + 1
                    logger.info("iteration:{} step:{}/{}, "
                            "SKILL loss:{:>9.6f}".format(
                        iteration_3, step % steps_per_epoch, steps_per_epoch, np.mean(loss)))
                loss = []
            precision_org_dev = model.precision(sess, dev_manager, id_to_tag)
            precision_org_test = model.precision(sess, test_manager, id_to_tag)
            best = evaluate(sess, model, "dev", dev_manager, id_to_tag,precision_loc_dev,precision_per_dev,precision_org_dev, logger)
            if best:
                save_model(sess, model, FLAGS.ckpt_path, logger)
                best_test,results= evaluate(sess, model, "test", test_manager, id_to_tag,precision_loc_test,precision_per_test,precision_org_test, logger)
                with open("CDTL_PSE-result.csv", "a",encoding='utf-8')as st_re:
                    st_re.write(str(results).replace("[", "").replace("]", ""))
                    st_re.write("\n")
示例#10
0
            models['upscale'][scale].parameters(),
            'lr':
            LEARNING_RATE * 0.1
        })
    optimizer.add_param_group({
        'params': models['extra'].parameters(),
        'lr': LEARNING_RATE * 0.1
    })

    #set Meter to calculate the average of loss
    train_loss = meter.AverageMeter()

    #running
    for epoch in range(EPOCH_START, EPOCH_START + EPOCH):
        #select upscale factor
        scale = UPSCALE_FACTOR_LIST[epoch % len(UPSCALE_FACTOR_LIST)]
        datasets.scale_factor = scale
        #load data
        train_data_loader = torch.utils.data.DataLoader(dataset=train_data,
                                                        batch_size=BATCH_SIZE,
                                                        shuffle=True,
                                                        num_workers=4)
        for iteration in range(ITER_PER_EPOCH):
            train(models, scale, train_data_loader, criterion, optimizer,
                  train_loss, False)
        print('{:0>3d}: train_loss: {:.8f}, scale: {}'.format(
            epoch + 1, train_loss.avg, scale))
        utils.save_model(models, scale, SAVE_PATH,
                         epoch // len(UPSCALE_FACTOR_LIST) + 1)
        train_loss.reset()
示例#11
0
def train():
    # load data sets
    train_sentences = load_sentences(FLAGS.train_file, FLAGS.lower, FLAGS.zeros)
    dev_sentences = load_sentences(FLAGS.dev_file, FLAGS.lower, FLAGS.zeros)
    test_sentences = load_sentences(FLAGS.test_file, FLAGS.lower, FLAGS.zeros)

    # Use selected tagging scheme (IOB / IOBES)
    update_tag_scheme(train_sentences, FLAGS.tag_schema)
    update_tag_scheme(test_sentences, FLAGS.tag_schema)

    # create maps if not exist
    if not os.path.isfile(FLAGS.map_file):
        # create dictionary for word
        if FLAGS.pre_emb:
            dico_chars_train = char_mapping(train_sentences, FLAGS.lower)[0]
            dico_chars, char_to_id, id_to_char = augment_with_pretrained(
                dico_chars_train.copy(),
                FLAGS.emb_file,
                list(itertools.chain.from_iterable(
                    [[w[0] for w in s] for s in test_sentences])
                )
            )
        else:
            _c, char_to_id, id_to_char = char_mapping(train_sentences, FLAGS.lower)

        # Create a dictionary and a mapping for tags
        _t, tag_to_id, id_to_tag = tag_mapping(train_sentences)
        with open(FLAGS.map_file, "wb") as f:
            pickle.dump([char_to_id, id_to_char, tag_to_id, id_to_tag], f)
    else:
        with open(FLAGS.map_file, "rb") as f:
            char_to_id, id_to_char, tag_to_id, id_to_tag = pickle.load(f)

    # prepare data, get a collection of list containing index
    train_data = prepare_dataset(
        train_sentences, char_to_id, tag_to_id, FLAGS.lower
    )
    dev_data = prepare_dataset(
        dev_sentences, char_to_id, tag_to_id, FLAGS.lower
    )
    test_data = prepare_dataset(
        test_sentences, char_to_id, tag_to_id, FLAGS.lower
    )
    print("%i / %i / %i sentences in train / dev / test." % (
        len(train_data), 0, len(test_data)))

    train_manager = BatchManager(train_data, FLAGS.batch_size)
    dev_manager = BatchManager(dev_data, 100)
    test_manager = BatchManager(test_data, 100)
    # make path for store log and model if not exist
    make_path(FLAGS)
    if os.path.isfile(FLAGS.config_file):
        config = load_config(FLAGS.config_file)
    else:
        config = config_model(char_to_id, tag_to_id)
        save_config(config, FLAGS.config_file)
    make_path(FLAGS)

    log_path = os.path.join("log", FLAGS.log_file)
    logger = get_logger(log_path)
    print_config(config, logger)

    # limit GPU memory
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    steps_per_epoch = train_manager.len_data
    with tf.Session(config=tf_config) as sess:
        model = create_model(sess, Model, FLAGS.ckpt_path, load_word2vec, config, id_to_char, logger)
        logger.info("start training")
        loss = []

        for i in range(100):
            for batch in train_manager.iter_batch(shuffle=True):
                #print batch
                step, batch_loss = model.run_step(sess, True, batch)
                #print step
                loss.append(batch_loss)
                if step % FLAGS.steps_check == 0:
                    iteration = step // steps_per_epoch + 1
                    logger.info("iteration:{} step:{}/{}, "
                                "NER loss:{:>9.6f}".format(
                        iteration, step%steps_per_epoch, steps_per_epoch, np.mean(loss)))
                    loss = []

            best = evaluate(sess, model, "dev", dev_manager, id_to_tag, logger)
            if best:
                save_model(sess, model, FLAGS.ckpt_path, logger)
            evaluate(sess, model, "test", test_manager, id_to_tag, logger)
示例#12
0
def main():
    torch.set_printoptions(linewidth=320)
    args = get_args()
    print(args)

    # set device
    device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")


    sintel_train_dir = '/media/yotamg/bd0eccc9-4cd5-414c-b764-c5a7890f9785/Yotam/Sintel/left_filtered/rgb'
    sintel_test_dir = sintel_train_dir
    sintel_label_dir = '/media/yotamg/bd0eccc9-4cd5-414c-b764-c5a7890f9785/Yotam/Sintel/left_filtered_cont_GT'

    sintel_train_files = [img for img in os.listdir(sintel_train_dir) if 'alley_1' not in img]
    sintel_test_files  = [img for img in os.listdir(sintel_test_dir) if 'alley_1' in img]
    sintel_train_filelist = [os.path.join(sintel_train_dir, img) for img in sintel_train_files]
    sintel_test_filelist  = [os.path.join(sintel_test_dir, img) for img in sintel_test_files]

    sintel_train_label_filelist = [img.replace(sintel_train_dir, sintel_label_dir).replace('_1100_maskImg.png', '_GT.dpt') for img in sintel_train_filelist]
    sintel_test_label_filelist  = [img.replace(sintel_test_dir,  sintel_label_dir).replace('_1100_maskImg.png', '_GT.dpt') for img in sintel_test_filelist]

    just_sintel = False
    if just_sintel:
        train_filelist = sintel_train_filelist
        test_filelist = sintel_test_filelist
        train_label_filelist = sintel_train_label_filelist
        test_label_filelist = sintel_test_label_filelist
    else:
        tau_train_dir = '/media/yotamg/bd0eccc9-4cd5-414c-b764-c5a7890f9785/Yotam/Tau-agent/left_filtered/rgb'
        tau_test_dir = tau_train_dir
        tau_label_dir = '/media/yotamg/bd0eccc9-4cd5-414c-b764-c5a7890f9785/Yotam/Tau-agent/left_filtered_cont_GT'
        #Take one scene for testing (WuMunchu)
        tau_train_files = [img for img in os.listdir(tau_train_dir) if 'WuM' not in img]
        tau_test_files = [img for img in os.listdir(tau_train_dir) if 'WuM' in img]
        tau_train_filelist = [os.path.join(tau_train_dir, img) for img in tau_train_files]
        tau_test_filelist = [os.path.join(tau_test_dir, img) for img in tau_test_files]

        tau_train_label_filelist = [img.replace(tau_train_dir, tau_label_dir).replace('_1100_maskImg.png', '_GT.dpt') for img in tau_train_filelist]
        tau_test_label_filelist = [img.replace(tau_test_dir, tau_label_dir).replace('_1100_maskImg.png', '_GT.dpt') for img in tau_test_filelist]

        train_filelist = sintel_train_filelist + tau_train_filelist
        test_filelist  = sintel_test_filelist + tau_test_filelist
        train_label_filelist = sintel_train_label_filelist + tau_train_label_filelist
        test_label_filelist  = sintel_test_label_filelist + tau_test_label_filelist

    # train_filelist = open("filename.txt").readlines()

    # train_dir = '/home/yotamg/data/raw_rgb_pngs'
    # train_filelist = [os.path.join(train_dir,img) for img in os.listdir(train_dir)]
    # test_filelist = train_filelist
    # label_dir = '/media/yotamg/bd0eccc9-4cd5-414c-b764-c5a7890f9785/Yotam/Depth_sintel_and_tau_from_shay/'
    # train_label_filelist = [img.replace(train_dir, label_dir).replace('.png', '.dpt') for img in train_filelist]
    # test_label_filelist = train_label_filelist

    transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])

    train_dataset = Dataset(image_filelist=train_filelist, label_filelist=train_label_filelist, train=True, pickle_name='train_cont_segmentation.pickle', transforms=transform, target_mode=args.target_mode)
    test_dataset  = Dataset(image_filelist=test_filelist,  label_filelist=test_label_filelist,  train=False, pickle_name='test_cont_segmentation.pickle', transforms=transform, target_mode=args.target_mode)

    train_data_loader = torch.utils.data.DataLoader(train_dataset, batch_size=12, shuffle=True, num_workers=1)
    test_data_loader = torch.utils.data.DataLoader(test_dataset, batch_size=12, shuffle=True, num_workers=1)

    net = Net(device=device, mode=args.target, target_mode=args.target_mode)
    model_name = 'DepthNet'

    if args.load_model:
        load_model(net, device,fullpath=args.load_path)

    if args.train:
        train_loss, val_loss = train(net=net, train_data_loader=train_data_loader, test_data_loader=test_data_loader, device=device, num_epochs=args.epochs)
        _plot_fig(train_loss, val_loss, model_name+'-Losses')
        save_model(net, epoch=args.epochs, experiment_name=args.experiment_name)
示例#13
0
def train_conv(args):
    # Creating convolution model here.
    ####################################

    print("Defining model")
    model_gat = SpKBGATModified(entity_embeddings, relation_embeddings,
                                args.entity_out_dim, args.entity_out_dim,
                                args.drop_GAT, args.alpha, args.nheads_GAT)
    print("Only Conv model trained")
    model_conv = SpKBGATConvOnly(entity_embeddings, relation_embeddings,
                                 args.entity_out_dim, args.entity_out_dim,
                                 args.drop_GAT, args.drop_conv, args.alpha,
                                 args.alpha_conv, args.nheads_GAT,
                                 args.out_channels)

    if CUDA:
        model_conv.cuda()
        model_gat.cuda()

    model_gat.load_state_dict(
        torch.load('{}/trained_{}.pth'.format(args.output_folder,
                                              args.epochs_gat - 1)))
    model_conv.final_entity_embeddings = model_gat.final_entity_embeddings
    model_conv.final_relation_embeddings = model_gat.final_relation_embeddings

    Corpus_.batch_size = args.batch_size_conv
    Corpus_.invalid_valid_ratio = int(args.valid_invalid_ratio_conv)

    optimizer = torch.optim.Adam(model_conv.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay_conv)

    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=25,
                                                gamma=0.5,
                                                last_epoch=-1)

    margin_loss = torch.nn.SoftMarginLoss()

    epoch_losses = []  # losses of all epochs
    print("Number of epochs {}".format(args.epochs_conv))

    for epoch in range(args.epochs_conv):
        random.shuffle(Corpus_.train_triples)
        Corpus_.train_indices = np.array(list(Corpus_.train_triples)).astype(
            np.int32)

        model_conv.train()  # getting in training mode
        start_time = time.time()
        epoch_loss = []

        if len(Corpus_.train_indices) % args.batch_size_conv == 0:
            num_iters_per_epoch = len(
                Corpus_.train_indices) // args.batch_size_conv
        else:
            num_iters_per_epoch = (len(Corpus_.train_indices) //
                                   args.batch_size_conv) + 1

        for iters in range(num_iters_per_epoch):
            start_time_iter = time.time()
            train_indices, train_values = Corpus_.get_iteration_batch(iters)

            if CUDA:
                train_indices = Variable(
                    torch.LongTensor(train_indices)).cuda()
                train_values = Variable(torch.FloatTensor(train_values)).cuda()

            else:
                train_indices = Variable(torch.LongTensor(train_indices))
                train_values = Variable(torch.FloatTensor(train_values))

            preds = model_conv(Corpus_, Corpus_.train_adj_matrix,
                               train_indices)

            optimizer.zero_grad()

            loss = margin_loss(preds.view(-1), train_values.view(-1))

            loss.backward()
            optimizer.step()

            epoch_loss.append(loss.data.item())

            end_time_iter = time.time()

            if iters % 500 == 0:
                print(
                    "Iteration-> {0}  , Iteration_time-> {1:.4f} , Iteration_loss {2:.4f}"
                    .format(iters, end_time_iter - start_time_iter,
                            loss.data.item()))

            summary.add_scalar('loss/conv_loss_iter', loss.data.item(),
                               iters + epoch * num_iters_per_epoch)

        scheduler.step()

        if epoch % 10 == 0:
            print("Epoch {} , average loss {} , epoch_time {}".format(
                epoch,
                sum(epoch_loss) / len(epoch_loss),
                time.time() - start_time))
        epoch_losses.append(sum(epoch_loss) / len(epoch_loss))
        summary.add_scalar('loss/conv_loss_epoch',
                           sum(epoch_loss) / len(epoch_loss), epoch)

        if (epoch + 1) % 10 == 0:
            save_model(model_conv, args.data, epoch,
                       args.output_folder + "conv/")

    now = time.localtime()
    f = open(
        (args.output_folder + "train_conv_epoch_losses_{}-{}-{}.txt").format(
            now.tm_year, now.tm_mon, now.tm_mday), 'w')
    for i in epoch_losses:
        f.write(str(i))
        f.write('\n')
    f.close()
示例#14
0
 def __call__(self, sess, epoch, iteration, model, loss, processed):
     if iteration == 0 and epoch % self.at_every_epoch == 0:
         print("Saving model...")
         save_model(self.saver, sess, self.path, model, self.config)
示例#15
0
    print("learning...")
    if params["batch_training"]:
        for e in range(params["n_epoch"]):
            print("epoch number: %d" % e)
            for X_batch, y_batch in batches:
                
                y_batch = np_utils.to_categorical(y_batch, n_classes)
                m.fit(X_batch, y_batch, batch_size=params["mini_batch_size"],
                      nb_epoch=1, verbose=1, validation_data=(X_test, y_test))
                # loss = m.train_on_batch(X_batch, y_batch)
                # print("loss: [%d]" % loss[0])

    else:
        # canister callback
        # callback = DBCallback("CharConvLM", "tokenized", params)
        
        X_train = batches.X
        y_train = np_utils.to_categorical(batches.y, n_classes)
        m.fit(X_train, y_train,
              validation_split=0.05,
              batch_size=params["batch_size"],
              nb_epoch=params["n_epoch"],
              show_accuracy=True,
              verbose=1# ,
              # callbacks=[callback]
        )

    print("saving model params...")
    fname = '/home/manjavacas/code/python/spelldict/models/model'
    save_model(m, batches.word_indexer, batches.char_indexer, fname)
def do_train(train_texts, train_labels, dev_texts, dev_labels, lstm_shape, lstm_settings,
    lstm_optimizer, batch_size=100,
    do_fit1=True, epochs1=5, model1_path=None, config1_path=None,
    do_fit2=False, epochs2=2, model2_path=None, config2_path=None,
    epoch_path=None, lstm_type=1):
    """Train a Keras model on the sentences in `train_texts`
        All the sentences in a text have the text's label
        do_fit1: Fit with frozen word embeddings
        do_fit2: Fit with unfrozen word embeddings (after fitting with frozen embeddings) at a lower
                learning rate
    """

    print('do_train: train_texts=%s dev_texts=%s' % (dim(train_texts), dim(dev_texts)))
    best_epoch_frozen, best_epoch_unfrozen = -1, -1

    n_train_sents = count_sentences(train_texts, batch_size, 'train')
    X_train, y_train = make_sentences(lstm_shape['max_length'], batch_size,
        train_texts, train_labels, 'train', n_train_sents)
    validation_data = None
    if dev_texts is not None:
        n_dev_sents = count_sentences(dev_texts, batch_size, 'dev')
        X_val, y_val = make_sentences(lstm_shape['max_length'], batch_size,
            dev_texts, dev_labels, 'dev', n_dev_sents)
        validation_data = (X_val, y_val)
    sentence_cache.flush()

    print("Loading spaCy")
    nlp = sentence_cache._load_nlp()
    embeddings = get_embeddings(nlp.vocab)
    model = build_lstm[lstm_type](embeddings, lstm_shape, lstm_settings)
    compile_lstm(model, lstm_settings['lr'])

    callback_list = None

    if do_fit1:
        if validation_data is not None:
            ra_val = RocAucEvaluation(validation_data=validation_data, interval=1, frozen=True,
                model_path=model1_path, config_path=config1_path)
            early = EarlyStopping(monitor='val_auc', mode='max', patience=2, verbose=1)
            callback_list = [ra_val, early]
        else:
            sae = SaveAllEpochs(model1_path, config1_path, epoch_path, True)
            if sae.last_epoch1() > 0:
                xprint('Reloading partially built model 1')
                get_embeds = partial(get_embeddings, vocab=nlp.vocab)
                model = load_model(model1_path, config1_path, True, get_embeds)
                compile_lstm(model, lstm_settings['lr'])
                epochs1 -= sae.last_epoch1()
            callback_list = [sae]

        if epochs1 > 0:
            model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs1,
                      validation_data=validation_data, callbacks=callback_list, verbose=1)
            if validation_data is not None:
                best_epoch_frozen = ra_val.best_epoch
                ra_val.best_epoch = -1
            else:
                save_model(model, model1_path, config1_path, True)

    if do_fit2:
         # Reload the best model so far, if it exists
        if os.path.exists(model1_path) and os.path.exists(config1_path):
            model = load_model(model1_path, config1_path, True,
                partial(get_embeddings, vocab=nlp.vocab))
        xprint("Unfreezing")
        for layer in model.layers:
            layer.trainable = True
        compile_lstm(model, lstm_settings['lr'] / 10)
        if validation_data is not None:
            # Reset early stopping
            ra_val = RocAucEvaluation(validation_data=validation_data, interval=1,
                frozen=False, was_frozen=True,
                get_embeddings=partial(get_embeddings, vocab=nlp.vocab),
                do_prime=True, model_path=model1_path, config_path=config1_path)
            early = EarlyStopping(monitor='val_auc', mode='max', patience=2, verbose=1)
            callback_list = [ra_val, early]
        else:
            sae = SaveAllEpochs(model2_path, config2_path, epoch_path, False)
            if sae.last_epoch2() > 0:
                xprint('Reloading partially built model 2')
                get_embeds = partial(get_embeddings, vocab=nlp.vocab)
                model = load_model(model2_path, config2_path, False)
                compile_lstm(model, lstm_settings['lr'])
                epochs2 -= sae.last_epoch2()
            callback_list = [sae]

        if epochs2 > 0:
            model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs2,
                  validation_data=validation_data, callbacks=callback_list, verbose=1)
            best_epoch_unfrozen = ra_val.best_epoch
            if validation_data is None:
                save_model(model, model2_path, config2_path, False)

    del nlp
    return model, (best_epoch_frozen, best_epoch_unfrozen)
					g_error = g_error.item()
			global_step += 1

			# Display Progress every few batches
			if global_step % 50 == 0:
				dis.add_scalar("epoch",epoch,global_step)
				dis.add_scalar("g_error",g_error,global_step)
				dis.add_scalar("d_error",d_error.item(),global_step)
				dis.add_scalar("beta",temperature.item(),global_step)
		if epoch % 20 == 0:
			test_samples = generator(z=test_noise,num_steps=num_steps,temperature=temperature)
			test_samples_vals = torch.argmax(test_samples,dim=2)
			test_samples_text = tensor_to_words(test_samples_vals,num_to_word_vocab)
			text_log.write("Epoch: "+str(epoch)+"\n"+test_samples_text+"\n")
		if epoch % 10 == 0:
			save_model(generator,summary_path)
			save_model(discriminator,summary_path)
		epoch += 1
except:
	save_model(generator,summary_path)
	save_model(discriminator,summary_path)

test_samples = generator(z=test_noise,num_steps=num_steps,temperature=temperature)
test_samples_vals = torch.argmax(test_samples,dim=2)
test_samples_text = tensor_to_words(test_samples_vals,num_to_word_vocab)
text_log.write("After training:\n"+test_samples_text+"\n")

def nll_gen(real_data,fake_data):
	'''
	Evaluate the generators ability to generate diverse samples
	'''    
示例#18
0
def train(M, src=None, trg=None, has_disc=True, saver=None, model_name=None):
    """Main training function

    Creates log file, manages datasets, trains model

    M          - (TensorDict) the model
    src        - (obj) source domain. Contains train/test Data obj
    trg        - (obj) target domain. Contains train/test Data obj
    has_disc   - (bool) whether model requires a discriminator update
    saver      - (Saver) saves models during training
    model_name - (str) name of the model being run with relevant parms info
    """
    # Training settings
    bs = 64
    iterep = 1000
    itersave = 20000
    n_epoch = 80
    epoch = 0
    feed_dict = {}

    # Create a log directory and FileWriter
    log_dir = os.path.join(args.logdir, model_name)
    delete_existing(log_dir)
    train_writer = tf.summary.FileWriter(log_dir)

    # Create a save directory
    if saver:
        model_dir = os.path.join('checkpoints', model_name)
        delete_existing(model_dir)
        os.makedirs(model_dir)

    # Replace src domain with psuedolabeled trg
    if args.dirt > 0:
        print "Setting backup and updating backup model"
        src = PseudoData(args.trg, trg, M.teacher)
        M.sess.run(M.update_teacher)

        # Sanity check model
        print_list = []
        if src:
            save_value(M.fn_ema_acc, 'test/src_test_ema_1k',
                     src.test,  train_writer, 0, print_list, full=False)

        if trg:
            save_value(M.fn_ema_acc, 'test/trg_test_ema',
                     trg.test,  train_writer, 0, print_list)
            save_value(M.fn_ema_acc, 'test/trg_train_ema_1k',
                     trg.train, train_writer, 0, print_list, full=False)

        print print_list

    if src: get_info(args.src, src)
    if trg: get_info(args.trg, trg)
    print "Batch size:", bs
    print "Iterep:", iterep
    print "Total iterations:", n_epoch * iterep
    print "Log directory:", log_dir

    for i in xrange(n_epoch * iterep):
        # Run discriminator optimizer
        if has_disc:
            update_dict(M, feed_dict, src, trg, bs)
            summary, _ = M.sess.run(M.ops_disc, feed_dict)
            train_writer.add_summary(summary, i + 1)

        # Run main optimizer
        update_dict(M, feed_dict, src, trg, bs)
        summary, _ = M.sess.run(M.ops_main, feed_dict)
        train_writer.add_summary(summary, i + 1)
        train_writer.flush()

        end_epoch, epoch = tb.utils.progbar(i, iterep,
                                            message='{}/{}'.format(epoch, i),
                                            display=args.run >= 999)

        # Update pseudolabeler
        if args.dirt and (i + 1) % args.dirt == 0:
            print "Updating teacher model"
            M.sess.run(M.update_teacher)

        # Log end-of-epoch values
        if end_epoch:
            print_list = M.sess.run(M.ops_print, feed_dict)

            if src:
                save_value(M.fn_ema_acc, 'test/src_test_ema_1k',
                         src.test,  train_writer, i + 1, print_list, full=False)

            if trg:
                save_value(M.fn_ema_acc, 'test/trg_test_ema',
                         trg.test,  train_writer, i + 1, print_list)
                save_value(M.fn_ema_acc, 'test/trg_train_ema_1k',
                         trg.train, train_writer, i + 1, print_list, full=False)

            print_list += ['epoch', epoch]
            print print_list

        if saver and (i + 1) % itersave == 0:
            save_model(saver, M, model_dir, i + 1)

    # Saving final model
    if saver:
        save_model(saver, M, model_dir, i + 1)
示例#19
0
def main(config):
    result_name = '{}_{}_{}way_{}shot'.format(
        config['data_name'],
        config['arch']['base_model'],
        config['general']['way_num'],
        config['general']['shot_num'],
    )
    save_path = os.path.join(config['general']['save_root'], result_name)
    if not os.path.exists(save_path):
        os.mkdir(save_path)
    fout_path = os.path.join(save_path, 'train_info.txt')
    fout_file = open(fout_path, 'a+')
    with open(os.path.join(save_path, 'config.json'), 'w') as handle:
        json.dump(config, handle, indent=4, sort_keys=True)
    print_func(config, fout_file)

    train_trsfms = transforms.Compose([
        transforms.Resize((config['general']['image_size'],
                           config['general']['image_size'])),
        transforms.ToTensor(),
        transforms.Normalize(mean=mean, std=std),
    ])

    val_trsfms = transforms.Compose([
        transforms.Resize((config['general']['image_size'],
                           config['general']['image_size'])),
        transforms.ToTensor(),
        transforms.Normalize(mean=mean, std=std),
    ])

    model = ALTNet(**config['arch'])
    print_func(model, fout_file)

    optimizer = optim.Adam(model.parameters(), lr=config['train']['optim_lr'])

    if config['train']['lr_scheduler']['name'] == 'StepLR':
        lr_scheduler = optim.lr_scheduler.StepLR(
            optimizer=optimizer, **config['train']['lr_scheduler']['args'])
    elif config['train']['lr_scheduler']['name'] == 'MultiStepLR':
        lr_scheduler = optim.lr_scheduler.MultiStepLR(
            optimizer=optimizer, **config['train']['lr_scheduler']['args'])
    else:
        raise RuntimeError

    if config['train']['loss']['name'] == 'CrossEntropyLoss':
        criterion = nn.CrossEntropyLoss(**config['train']['loss']['args'])
    else:
        raise RuntimeError

    device, _ = prepare_device(config['n_gpu'])
    model = model.to(device)
    criterion = criterion.to(device)

    best_val_prec1 = 0
    best_test_prec1 = 0
    for epoch_index in range(config['train']['epochs']):
        print_func('{} Epoch {} {}'.format('=' * 35, epoch_index, '=' * 35),
                   fout_file)
        train_dataset = ImageFolder(
            data_root=config['general']['data_root'],
            mode='train',
            episode_num=config['train']['episode_num'],
            way_num=config['general']['way_num'],
            shot_num=config['general']['shot_num'],
            query_num=config['general']['query_num'],
            transform=train_trsfms,
        )
        val_dataset = ImageFolder(
            data_root=config['general']['data_root'],
            mode='val',
            episode_num=config['test']['episode_num'],
            way_num=config['general']['way_num'],
            shot_num=config['general']['shot_num'],
            query_num=config['general']['query_num'],
            transform=val_trsfms,
        )
        test_dataset = ImageFolder(
            data_root=config['general']['data_root'],
            mode='test',
            episode_num=config['test']['episode_num'],
            way_num=config['general']['way_num'],
            shot_num=config['general']['shot_num'],
            query_num=config['general']['query_num'],
            transform=val_trsfms,
        )

        print_func(
            'The num of the train_dataset: {}'.format(len(train_dataset)),
            fout_file)
        print_func('The num of the val_dataset: {}'.format(len(val_dataset)),
                   fout_file)
        print_func('The num of the test_dataset: {}'.format(len(test_dataset)),
                   fout_file)

        train_loader = torch.utils.data.DataLoader(
            train_dataset,
            batch_size=config['train']['batch_size'],
            shuffle=True,
            num_workers=config['general']['workers_num'],
            drop_last=True,
            pin_memory=True)
        val_loader = torch.utils.data.DataLoader(
            val_dataset,
            batch_size=config['test']['batch_size'],
            shuffle=True,
            num_workers=config['general']['workers_num'],
            drop_last=True,
            pin_memory=True)
        test_loader = torch.utils.data.DataLoader(
            test_dataset,
            batch_size=config['test']['batch_size'],
            shuffle=True,
            num_workers=config['general']['workers_num'],
            drop_last=True,
            pin_memory=True)

        # train for 5000 episodes in each epoch
        print_func('============ Train on the train set ============',
                   fout_file)
        train(train_loader, model, criterion, optimizer, epoch_index, device,
              fout_file, config['general']['image2level'],
              config['general']['print_freq'])

        print_func('============ Validation on the val set ============',
                   fout_file)
        val_prec1 = validate(val_loader, model, criterion, epoch_index, device,
                             fout_file, config['general']['image2level'],
                             config['general']['print_freq'])
        print_func(
            ' * Prec@1 {:.3f} Best Prec1 {:.3f}'.format(
                val_prec1, best_val_prec1), fout_file)

        print_func('============ Testing on the test set ============',
                   fout_file)
        test_prec1 = validate(test_loader, model, criterion, epoch_index,
                              device, fout_file,
                              config['general']['image2level'],
                              config['general']['print_freq'])
        print_func(
            ' * Prec@1 {:.3f} Best Prec1 {:.3f}'.format(
                test_prec1, best_test_prec1), fout_file)

        if val_prec1 > best_val_prec1:
            best_val_prec1 = val_prec1
            best_test_prec1 = test_prec1
            save_model(model,
                       save_path,
                       config['data_name'],
                       epoch_index,
                       is_best=True)

        if epoch_index % config['general'][
                'save_freq'] == 0 and epoch_index != 0:
            save_model(model,
                       save_path,
                       config['data_name'],
                       epoch_index,
                       is_best=False)

        lr_scheduler.step()

    print_func('............Training is end............', fout_file)
示例#20
0
        train_loss, train_acc = train(net, train_loader, criterion, optimizer,
                                      args.v)
        val_loss, val_acc, fscore = validate(net, val_loader, criterion)
        end = time.time()

        # print stats
        stats = """Epoch: {}\t train loss: {:.3f}, train acc: {:.3f}\t
                val loss: {:.3f}, val acc: {:.3f}\t fscore: {:.3f}\t
                time: {:.1f}s""".format(e, train_loss, train_acc, val_loss,
                                        val_acc, fscore, end - start)
        print(stats)
        print(stats, file=logfile)
        log_value('train_loss', train_loss, e)
        log_value('val_loss', val_loss, e)
        log_value('fscore', fscore, e)

        #early stopping and save best model
        if val_loss < best_loss:
            best_loss = val_loss
            patience = args.patience
            utils.save_model(
                {
                    'arch': args.model,
                    'state_dict': net.state_dict()
                }, 'saved-models/{}-run-{}.pth.tar'.format(args.model, run))
        else:
            patience -= 1
            if patience == 0:
                print('Run out of patience!')
                break
示例#21
0
def main_worker(gpu, args):
    args.gpu = gpu

    if args.multiGPUs and args.gpu != 0:

        def print_pass(*args):
            pass

        builtins.print = print_pass

    if args.gpu is not None:
        print('Use GPU: {} for training'.format(args.gpu))

    if args.multiGPUs:
        args.rank = gpu
        setup(args.rank, args.world_size)

        if args.gpu is not None:
            torch.cuda.set_device(args.gpu)
            args.workers = int(
                (args.workers + args.world_size - 1) / args.world_size)

    # prepare data
    if args.task == 'pvqa':
        dict_path = 'data/pvqa/pvqa_dictionary.pkl'
        dictionary = Dictionary.load_from_file(dict_path)
        train_dset = PVQAFeatureDataset(args.train, dictionary, adaptive=False)
        val_dset = PVQAFeatureDataset(args.val, dictionary, adaptive=False)
        w_emb_path = 'data/pvqa/glove_pvqa_300d.npy'
    else:
        raise Exception('%s not implemented yet' % args.task)

    if args.multiGPUs:
        train_sampler = torch.utils.data.distributed.DistributedSampler(
            train_dset)
    else:
        train_sampler = None

    if args.task == 'pvqa':
        train_loader = DataLoader(train_dset,
                                  args.batch_size,
                                  shuffle=False,
                                  num_workers=args.workers,
                                  pin_memory=True,
                                  sampler=train_sampler)
        eval_loader = DataLoader(val_dset,
                                 args.batch_size,
                                 shuffle=False,
                                 num_workers=args.workers,
                                 pin_memory=True)

    # prepare model

    model = BanModel(ntoken=train_dset.dictionary.ntoken,
                     num_ans_candidates=train_dset.num_ans_candidates,
                     num_hid=args.num_hid,
                     v_dim=train_dset.v_dim,
                     op=args.op,
                     gamma=args.gamma)

    tfidf = None
    weights = None
    model.w_emb.init_embedding(w_emb_path, tfidf, weights)

    if args.multiGPUs:
        if args.gpu is not None:
            torch.cuda.set_device(args.gpu)
            model.cuda(args.gpu)
            args.workers = int(
                (args.workers + args.world_size - 1) / args.world_size)
            model = DDP(model, device_ids=[args.gpu])
        else:
            model.cuda()
            model = DDP(model)
    else:
        torch.cuda.set_device(args.gpu)
        model.cuda(args.gpu)

    # load snapshot
    if args.input is not None:
        print('#8')
        print('loading %s' % args.input)
        if args.gpu is None:
            model_data = torch.load(args.input)
        else:
            loc = 'cuda:{}'.format(args.gpu)
            model_data = torch.load(args.input, map_location=loc)
        model_data_sd = model_data.get('model_state', model_data)

        for name, param in model.named_parameters():
            if name in model_data_sd:
                param.data = model_data_sd[name]

        # optimizer = torch.optim.Adamax(filter(lambda p: p.requires_grad, model.parameters()))
        # optimizer.load_state_dict(model_data.get('optimizer_state', model_data))
        args.start_epoch = model_data['epoch'] + 1

    optimizer = torch.optim.Adamax(
        filter(lambda p: p.requires_grad, model.parameters()))

    best_eval_score = 0
    for epoch in range(args.start_epoch, args.epochs):
        if args.multiGPUs:
            train_sampler.set_epoch(epoch)
        adjust_learning_rate(optimizer, epoch, args)

        # train for one epoch
        train(train_loader, eval_loader, train_dset, model, optimizer, epoch,
              args)

        eval_score = evaluate(eval_loader, model, args)

        with open(os.path.join(args.output, 'log.log'), 'a') as f:
            f.write(str(datetime.datetime.now()))
            f.write('epoch=%d' % epoch)
            f.write('eval_score=%.4f' % eval_score)

        print('eval_score=', eval_score)
        print('best eval_score = ', best_eval_score)

        if not args.multiGPUs or (args.multiGPUs and args.gpu == 0):
            if eval_score > best_eval_score:
                model_path = os.path.join(args.output, 'model_best.pth')
                utils.save_model(model_path, model, epoch, optimizer)
                best_eval_score = eval_score
示例#22
0
            question, answer_target = question.to(device), answer_target.to(
                device)
            optimizer.zero_grad()
            output = net(question)
            loss = criterion(output, answer_target)
            loss.backward()
            #nn.utils.clip_grad_norm(net.parameters(),0.25)
            optimizer.step()
            pred = output.data.max(1)[1]
            correct = (pred == answer_target).data.cpu().sum()

            acc += correct.item()
            number_dataset += len(answer_target)
            total_loss += loss

        total_loss /= len(train_data)
        acc = acc / number_dataset * 100.

        logger.info('-------[Epoch]:{}-------'.format(epoch))
        logger.info('[Train] Loss:{:.6f} , Train_Acc:{:.6f}%'.format(
            total_loss, acc))
        # Evaluation
        if val_data is not None:
            eval_score = evaluate(net, val_data, logger, device)
            if eval_score > best_eval_score:
                best_eval_score = eval_score
                best_epoch = epoch
                utils.save_model(ckpt_path, net, epoch)
            logger.info('[Result] The best acc is {:.6f}% at epoch {}'.format(
                best_eval_score, best_epoch))
示例#23
0
def main():
    print(device_lib.list_local_devices())
    args = parse_args()
    args.verbose = True

    try:
        # get paths to midi files in --data_dir
        midi_files = [os.path.join(args.data_dir, path) \
                      for path in os.listdir(args.data_dir) \
                      if '.mid' in path or '.midi' in path]
    except OSError as e:
        utils.log(
            'Error: Invalid --data_dir, {} directory does not exist. Exiting.',
            args.verbose)
        exit(1)

    utils.log(
        'Found {} midi files in {}'.format(len(midi_files), args.data_dir),
        args.verbose)

    if len(midi_files) < 1:
        utils.log(
            'Error: no midi files found in {}. Exiting.'.format(args.data_dir),
            args.verbose)
        exit(1)

    # create the experiment directory and return its name
    experiment_dir = utils.create_experiment_dir(args.experiment_dir,
                                                 args.verbose)

    # write --message to experiment_dir
    if args.message:
        with open(os.path.join(experiment_dir, 'message.txt'), 'w') as f:
            f.write(args.message)
            utils.log(
                'Wrote {} bytes to {}'.format(
                    len(args.message),
                    os.path.join(experiment_dir, 'message.txt')), args.verbose)

    val_split = 0.2  # use 20 percent for validation
    val_split_index = int(float(len(midi_files)) * val_split)

    # use generators to lazy load train/validation data, ensuring that the
    # user doesn't have to load all midi files into RAM at once
    train_generator = utils.get_data_generator(
        midi_files[0:val_split_index],
        window_size=args.window_size,
        batch_size=args.batch_size,
        num_threads=args.n_jobs,
        max_files_in_ram=args.max_files_in_ram)

    val_generator = utils.get_data_generator(
        midi_files[val_split_index:],
        window_size=args.window_size,
        batch_size=args.batch_size,
        num_threads=args.n_jobs,
        max_files_in_ram=args.max_files_in_ram)

    model, epoch = get_model(args)
    if args.verbose:
        print(model.summary())

    utils.save_model(model, experiment_dir)
    utils.log(
        'Saved model to {}'.format(os.path.join(experiment_dir, 'model.json')),
        args.verbose)

    callbacks = get_callbacks(experiment_dir)

    print('fitting model...')
    # this is a somewhat magic number which is the average number of length-20 windows
    # calculated from ~5K MIDI files from the Lakh MIDI Dataset.
    magic_number = 827
    start_time = time.time()
    model.fit_generator(train_generator,
                        steps_per_epoch=len(midi_files) * magic_number /
                        args.batch_size,
                        epochs=args.num_epochs,
                        validation_data=val_generator,
                        validation_steps=len(midi_files) * 0.2 * magic_number /
                        args.batch_size,
                        verbose=1,
                        callbacks=callbacks,
                        initial_epoch=epoch)
    utils.log('Finished in {:.2f} seconds'.format(time.time() - start_time),
              args.verbose)
def test_mlp(learning_rate=0.01, L2_reg=0.00000001,  n_epochs=200,
             dataset='theano.join.data', ref_dataset = 'ref.theano.join.data', batch_size=10000, max_iter = 5000, 
             output='theano.model.out', validation_freq = 100, ada_epsilon = 0.000001, L2_reg_tie = 0.001, map_file = "labels.mapping"):
    """
    Demonstrate stochastic gradient descent optimization for a multilayer
    perceptron

    This is demonstrated on MNIST.

    :type learning_rate: float
    :param learning_rate: learning rate used (factor for the stochastic
    gradient

    :type L2_reg: float
    :param L2_reg: L2-norm's weight when added to the cost (see
    regularization)

    :type n_epochs: int
    :param n_epochs: maximal number of epochs to run the optimizer

    :type dataset: string
    :param dataset: the path to the theano.classifier.data


   """
    print (" Learning with params : ")
    print (" Learning rate : " + str(learn_rate)); 
    print (" Regularlization params : " + str(L2_reg))
    print (" Regularlization of tieing together : " + str(L2_reg_tie))
    print (" Batch size : "  + str(batch_size))
    print (" Max Iter : " + str(max_iter))
    print (" Epochs : " + str(n_epochs))
    print (" Evaluation frequency  : " + str(validation_freq))
    
    print ('... loading data ')
    
    ##### LOAD DATASET ORIGINAL and REF ##############
    print (' ----> load the mapping matrix ')
    mapping_matrix = load_mapping_matrix(map_file)
    
    print (' ----> load the original data ')
    datasets = load_data(dataset)

    train_set_x, train_set_y = datasets[0]
    valid_set_x, valid_set_y = datasets[1]
    E = datasets[2]
    W1 = datasets[3]
    B1 = datasets[4]
    W2 = datasets[5]
    print (' ----> load the ref data ')
    ref_datasets = load_data(ref_dataset)
    ref_train_set_x, ref_train_set_y = ref_datasets[0]
    ref_valid_set_x, ref_valid_set_y = ref_datasets[1]
    refE = ref_datasets[2]
    refW1 = ref_datasets[3]
    refB1 = ref_datasets[4]
    refW2 = ref_datasets[5]

    # compute number of minibatches for training, validation and testing
    
    n_train_batches = train_set_x.owner.inputs[0].get_value(borrow=True).shape[0] / batch_size
    n_ref_train_batches = ref_train_set_x.owner.inputs[0].get_value(borrow=True).shape[0] / batch_size
    n_valid_batches = valid_set_x.owner.inputs[0].get_value(borrow=True).shape[0] / batch_size
    
    if train_set_x.owner.inputs[0].get_value(borrow=True).shape[0]  % batch_size > 100: n_train_batches +=1
    if valid_set_x.owner.inputs[0].get_value(borrow=True).shape[0] % batch_size > 100 : n_valid_batches +=1 
    
    print 'Training batches : ' + str(n_train_batches) 
    print 'Ref training batches : ' + str(n_ref_train_batches)
    print 'Valid batches : ' + str(n_valid_batches)
    
    ######################
    # BUILD ACTUAL MODEL #
    ######################
    print '... building the model'

    # allocate symbolic variables for the data
    index = T.lscalar()  # index to a [mini]batch
    ref_index = T.lscalar() # Reference index to the source minibatch 
    
    x = T.imatrix('x')  # the data is presented as rasterized images
    xref = T.imatrix('xref')  # the data is presented as rasterized images
    yref = T.ivector('yref')  # the labels are presented as 1D vector of
    y = T.ivector('y')  # the labels are presented as 1D vector of
                        # [int] labels
    rng = numpy.random.RandomState(1234)

    ###### DROP OUT RATE #############
    
    dropout_rate_hidden = 0.5
    dropout_rate_visible = 0.2
    #############################
        
    # construct the MLP class
    classifier = MLP(rng,
        input=x,
        refInput=xref,
        E=E, 
        W1=W1,
        B1=B1,
        W2 = W2,
        refE = refE,
        refW1 = refW1, 
        refB1 = refB1, 
        refW2 = refW2, 
        mapping = mapping_matrix,
        drop_out_rate=dropout_rate_hidden,
        drop_out_embedding_rate=dropout_rate_visible
    )

    train_errors = (classifier.errors(y))    
    cost = (
        classifier.negative_log_likelihood(y)
        + classifier.refNegative_log_likelihood(yref)
        + L2_reg * classifier.L2_sqr
        + L2_reg_tie * classifier.reg_L2_sqr
    )
    

    # end-snippet-4

    # compiling a Theano function that computes the mistakes that are made
    # by the model on a minibatch (remember index should always to even) 
    validate_model = theano.function(
        inputs=[index],
        outputs=classifier.errors(y),
        givens={
            x: valid_set_x[index * batch_size: (index + 1) * batch_size],   # x,y here is symbolic variable 
            y: valid_set_y[index * batch_size: (index + 1) * batch_size]
            #xref: numpy.zeros(batch_size), 
            #yref: numpy.zeros(batch_size)
        }
    )

    # compute the gradient of cost with respect to theta 
    gparams = [T.grad(cost, param) for param in classifier.params]
    # Put the adagrad here 

    #learning_rate = T.fscalar('lr')  # learning rate to use
    updates = OrderedDict()
    for accugrad, param, gparam in zip(classifier._accugrads, classifier.params, gparams):
            agrad = accugrad + gparam * gparam
            dx = - (learning_rate / T.sqrt(agrad + ada_epsilon)) * gparam
            updates[param] = param + dx
            updates[accugrad] = agrad

    # compiling a Theano function `train_model` that returns the cost, but
    # in the same time updates the parameter of the model based on the rules
    # defined in `updates`
    train_model = theano.function(
        inputs=[index,ref_index],
        outputs=(cost, train_errors),
        updates=updates,
        givens={
            x: train_set_x[index * batch_size: (index + 1) * batch_size],   # x,y here is symbolic variable 
            y: train_set_y[index * batch_size: (index + 1) * batch_size],
            xref: ref_train_set_x[ref_index * batch_size: (ref_index + 1) * batch_size],
            yref: ref_train_set_y[ref_index * batch_size: (ref_index + 1) * batch_size]
        }
    )
    # end-snippet-5

    ###############
    # TRAIN MODEL #
    ###############
    print '... training '

    # early-stopping parameters
    patience = 2000  # Long Duong : At least have to went through this much iteration 
    patience_increase = 2  # wait this much longer when a new best is found
    improvement_threshold = 0.995  # a relative improvement of this much is
                                   # considered significant
    #validation_frequency = min(n_train_batches, patience / 2)
                                  # go through this many
                                  # minibatche before checking the network
                                  # on the validation set; in this case we
                                  # check every epoch
    validation_frequency = validation_freq
    
    ######## FOR TESTING ONLY ##################
    #validation_frequency = 5 
    #n_train_batches = 10 
    #n_epochs = 1 
    ######################################
    
    
    best_validation_loss = numpy.inf
    best_iter = 0
    test_score = 0.
    start_time = time.clock()

    epoch = 0
    done_looping = False
    ref_batch_idx = 0
    while (epoch < n_epochs) and (not done_looping):
        epoch = epoch + 1
        for minibatch_index in xrange(n_train_batches): 
            (minibatch_avg_cost, minibatch_avg_error) = train_model(minibatch_index, ref_batch_idx)
            ref_batch_idx += 1
            if ref_batch_idx >= n_ref_train_batches:
                    ref_batch_idx = 0 
                
            # iteration number
            iter = (epoch - 1) * n_train_batches + minibatch_index
            print (' Iteration :  ' + str(iter) + ' with Cost  (join) = ' + str(minibatch_avg_cost) + '  with errors (target only) = ' + str(minibatch_avg_error))
            # Long Duong : since in each epoch => n_train_batches has covered 
            # iter : is the number of update for the parameters (~ number of batches considered) 

            if (iter + 1) % validation_frequency == 0:
                # Note that because we 
                validation_losses = [validate_model( i) for i
                                     in xrange(n_valid_batches)]
                this_validation_loss = numpy.mean(validation_losses)

                print(
                    'epoch %i, minibatch %i/%i, validation error %f %%' %
                    (
                        epoch,
                        minibatch_index ,
                        n_train_batches,
                        this_validation_loss * 100.
                    )
                )

                # if we got the best validation score until now
                if this_validation_loss < best_validation_loss:
                    #improve patience if loss improvement is good enough
                    if (this_validation_loss < best_validation_loss * improvement_threshold):
                        # Long Duong : this is the key : need iter to get this good result => Waiting this much iter to expect 
                        # other better result ....  
                        patience = max(patience, iter * patience_increase)

                    best_validation_loss = this_validation_loss
                    best_iter = iter
                    # Save the model  
                    save_model(output,classifier.embeddingLayer.E.get_value(), 
                                       (classifier.dropout_HiddenLayer.W.get_value() * (1-dropout_rate_visible )).T, 
                                       classifier.dropout_HiddenLayer.b.get_value(), 
                                       (classifier.dropout_LogRegressionLayer.W.get_value() * (1- dropout_rate_hidden)).T)
                    
            # Long Duong : add max_iter criterion 
            if (patience <= iter) or (iter > max_iter) :
                done_looping = True
                break
            
    end_time = time.clock()
    print(('Optimization complete. Best validation score of %f %% '
           'obtained at iteration %i') %
          (best_validation_loss * 100., best_iter + 1))
    print >> sys.stderr, ('The code for file ' +
                          os.path.split(__file__)[1] +
                          ' ran for %.2fm' % ((end_time - start_time) / 60.))
示例#25
0
import linear_model as lm
import utils

train_path = os.path.join(os.path.dirname(__file__), "./data/train.csv")
test_path = os.path.join(os.path.dirname(__file__), "./data/test.csv")
output_path = os.path.join(os.path.dirname(__file__), "./ans.csv")
model_path = os.path.join(os.path.dirname(__file__), "./model")
scaler_path = os.path.join(os.path.dirname(__file__), "./scaler")
 
fea_select, y_pos = (0, 4, 5, 6, 7, 8, 9, 16), 70


x, y= utils.load(train_path, mode = 'train', fea_select = fea_select, y_pos = y_pos) # 讀出所有 data 、擷取 feature 、劃分 9 天成一筆
x, max, min = utils.rescaling(x) # 作 rescaling , 在 [0, 1] 間
x, y = utils.shuffle(x, y)

x_train, y_train, x_val, y_val = utils.validation(x, y, ratio = 0.1)
b, w = lm.LinearRegression(x, y, lr = 100000, epoch = 1000000, lr_method = 'adagrad', x_val = x_val, y_val = y_val)


x_test = utils.load(test_path, mode = 'test', fea_select = fea_select, y_pos = y_pos) 
x_test = utils.scaling(x_test, max, min) 

predicted = lm.predict(x_test, b, w)
print('>>> Predicted Result :\n', predicted)

utils.save_scaler(max, min, scaler_path)
utils.save_model(b, w, model_path)
utils.save_ans(predicted, output_path)

示例#26
0
############
# Train the model
############
tf.python.control_flow_ops = tf
fileJson = 'model_nvidia7' + '.json'
fileH5 = 'model_nvidia7' + '.h5'
number_of_epochs = 20
number_of_samples_per_epoch = train_batch_size * 150
number_of_validation_samples = validation_batch_size * 100
learning_rate = 1e-4
model = utils.get_model()
model.compile(
    optimizer=Adam(learning_rate),
    loss="mse",
)
try:  #try to load the weights if previously saved
    model.load_weights(fileH5)
    print('Resume training from previously saved weights.')
except:
    print('Training from scratch.')
    pass
history = model.fit_generator(train_generator,
                              samples_per_epoch=number_of_samples_per_epoch,
                              nb_epoch=number_of_epochs,
                              validation_data=valid_generator,
                              nb_val_samples=number_of_validation_samples,
                              verbose=1)

utils.save_model(fileJson, fileH5)
示例#27
0
def train():
    # load data sets
    train_sentences = load_sentences(FLAGS.train_file, FLAGS.lower,
                                     FLAGS.zeros)
    dev_sentences = load_sentences(FLAGS.dev_file, FLAGS.lower, FLAGS.zeros)
    test_sentences = load_sentences(FLAGS.test_file, FLAGS.lower, FLAGS.zeros)

    # Use selected tagging scheme (IOB / IOBES)
    update_tag_scheme(train_sentences, FLAGS.tag_schema)
    update_tag_scheme(test_sentences, FLAGS.tag_schema)

    # create maps if not exist, load data if exists maps
    if not os.path.isfile(FLAGS.map_file):
        # create dictionary for word
        if FLAGS.pre_emb:
            dico_chars_train = char_mapping(train_sentences, FLAGS.lower)[0]
            dico_chars, char_to_id, id_to_char = augment_with_pretrained(
                dico_chars_train.copy(), FLAGS.emb_file,
                list(
                    itertools.chain.from_iterable([[w[0] for w in s]
                                                   for s in test_sentences])))
        else:
            _c, char_to_id, id_to_char = char_mapping(train_sentences,
                                                      FLAGS.lower)

        # Create a dictionary and a mapping for tags
        _t, tag_to_id, id_to_tag = tag_mapping(train_sentences)
        with open(FLAGS.map_file, "wb") as f:
            pickle.dump([char_to_id, id_to_char, tag_to_id, id_to_tag], f)
    else:
        with open(FLAGS.map_file, "rb") as f:
            char_to_id, id_to_char, tag_to_id, id_to_tag = pickle.load(f)

    # prepare data, get a collection of list containing index
    train_data = prepare_dataset(train_sentences, char_to_id, tag_to_id,
                                 FLAGS.lower)
    dev_data = prepare_dataset(dev_sentences, char_to_id, tag_to_id,
                               FLAGS.lower)
    test_data = prepare_dataset(test_sentences, char_to_id, tag_to_id,
                                FLAGS.lower)
    print("%i / %i / %i sentences in train / dev / test." %
          (len(train_data), 0, len(test_data)))

    train_manager = BatchManager(train_data, FLAGS.batch_size)
    dev_manager = BatchManager(dev_data, 100)
    test_manager = BatchManager(test_data, 100)

    # make path for store log and model if not exist
    make_path(FLAGS)
    if os.path.isfile(FLAGS.config_file):
        config = load_config(FLAGS.config_file)
    else:
        config = config_model(char_to_id, tag_to_id)
        save_config(config, FLAGS.config_file)
    make_path(FLAGS)

    log_path = os.path.join("log", FLAGS.log_file)
    logger = get_logger(log_path)
    print_config(config, logger)

    # 设置训练日志目录
    train_log = os.path.join(FLAGS.logdir, "train")
    if not os.path.exists(train_log):
        os.makedirs(train_log)

    # limit GPU memory
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    steps_per_epoch = train_manager.len_data  # the nums of batch data
    with tf.Session(config=tf_config) as sess:
        model = create_model(sess, Model, FLAGS.ckpt_path, load_word2vec,
                             config, id_to_char, logger)
        # 观察所建立的计算图
        train_writer = tf.summary.FileWriter(train_log, sess.graph)
        logger.info("start training")
        loss = []
        dev_f1 = []
        test_f1 = []
        for i in range(100):
            for batch in train_manager.iter_batch(shuffle=True):
                step, batch_loss, merged = model.run_step(
                    sess, True, batch)  # step是global step
                # 在迭代中输出到结果
                train_writer.add_summary(merged, step)
                loss.append(batch_loss)
                if step % FLAGS.steps_check == 0:
                    iteration = step // steps_per_epoch + 1
                    logger.info("iteration:{} step:{}/{}, "
                                "NER loss:{:>9.6f}".format(
                                    iteration, step % steps_per_epoch,
                                    steps_per_epoch, np.mean(loss)))
                    loss = []

            # use dev data to validation the model
            best, dev_f1_value = evaluate(sess, model, "dev", dev_manager,
                                          id_to_tag, logger)
            # store the dev f1
            dev_f1.append(dev_f1_value)
            if best:
                save_model(sess, model, FLAGS.ckpt_path, logger)
            # use current the  model to test
            _, test_f1_value = evaluate(sess, model, "test", test_manager,
                                        id_to_tag, logger)
            #   store the test f1
            test_f1.append(test_f1_value)
        # write the dev_f1 and test_f1 to file
        f1_result = {}
        f1_result["dev_f1"] = dev_f1
        f1_result["test_f1"] = test_f1
        write_data_to_file(f1_result, "f1_result")
def train():
    # load data sets
    train_sentences = load_sentences(FLAGS.train_file, FLAGS.lower,
                                     FLAGS.zeros)
    dev_sentences = load_sentences(FLAGS.dev_file, FLAGS.lower, FLAGS.zeros)
    test_sentences = load_sentences(FLAGS.test_file, FLAGS.lower, FLAGS.zeros)

    # Use selected tagging scheme (IOB / IOBES)
    #update_tag_scheme(train_sentences, FLAGS.tag_schema)
    #update_tag_scheme(test_sentences, FLAGS.tag_schema)

    # create maps if not exist
    if not os.path.isfile(FLAGS.map_file):
        # Create a dictionary and a mapping for tags
        _t, tag_to_id, id_to_tag = tag_mapping(train_sentences)
        with open(FLAGS.map_file, "wb") as f:
            pickle.dump([tag_to_id, id_to_tag], f)
    else:
        with open(FLAGS.map_file, "rb") as f:
            tag_to_id, id_to_tag = pickle.load(f)

    # prepare data, get a collection of list containing index
    train_data = prepare_dataset(train_sentences, FLAGS.max_seq_len, tag_to_id,
                                 FLAGS.lower)
    dev_data = prepare_dataset(dev_sentences, FLAGS.max_seq_len, tag_to_id,
                               FLAGS.lower)
    test_data = prepare_dataset(test_sentences, FLAGS.max_seq_len, tag_to_id,
                                FLAGS.lower)
    print("%i / %i / %i sentences in train / dev / test." %
          (len(train_data), 0, len(test_data)))

    train_manager = BatchManager(train_data, FLAGS.batch_size)
    dev_manager = BatchManager(dev_data, FLAGS.batch_size)
    test_manager = BatchManager(test_data, FLAGS.batch_size)
    # make path for store log and model if not exist
    make_path(FLAGS)
    if os.path.isfile(FLAGS.config_file):
        config = load_config(FLAGS.config_file)
    else:
        config = config_model(tag_to_id)
        save_config(config, FLAGS.config_file)
    make_path(FLAGS)

    log_path = os.path.join("log", FLAGS.log_file)
    logger = get_logger(log_path)
    print_config(config, logger)

    # limit GPU memory
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    steps_per_epoch = train_manager.len_data
    with tf.Session(config=tf_config) as sess:
        model = create_model(sess, Model, FLAGS.ckpt_path, config, logger)

        logger.info("start training")
        loss = []
        for i in range(100):
            for batch in train_manager.iter_batch(shuffle=True):
                step, batch_loss = model.run_step(sess, True, batch)

                loss.append(batch_loss)
                if step % FLAGS.steps_check == 0:
                    iteration = step // steps_per_epoch + 1
                    logger.info("iteration:{} step:{}/{}, "
                                "NER loss:{:>9.6f}".format(
                                    iteration, step % steps_per_epoch,
                                    steps_per_epoch, np.mean(loss)))
                    loss = []

            best = evaluate(sess, model, "dev", dev_manager, id_to_tag, logger)
            if best:
                save_model(sess,
                           model,
                           FLAGS.ckpt_path,
                           logger,
                           global_steps=step)
            evaluate(sess, model, "test", test_manager, id_to_tag, logger)
示例#29
0
文件: main.py 项目: yanghang111/DCFEE
def train():
    train_sentences, dico, char_to_id, id_to_char = load_sentence(
        FLAGS.train_file)
    if not os.path.isfile(FLAGS.map_file):
        if FLAGS.pre_emb:
            dico_chars, char_to_id, id_to_char = augment_with_pretrained(
                dico.copy(),
                FLAGS.emb_file,
            )
        else:
            sentences, dico, char_to_id, id_to_char = load_sentence(
                FLAGS.train_file)
        print(train_sentences[0])
        with open(FLAGS.map_file, 'wb') as f:
            pickle.dump([char_to_id, id_to_char], f)
    else:
        with open(FLAGS.map_file, 'rb') as f:
            char_to_id, id_to_char = pickle.load(f)

    train_data, test_data, dev_data = prepare_dataset(train_sentences,
                                                      char_to_id)
    print(train_data[0])
    print(test_data[0])
    print(dev_data[0])
    print(len(train_data), len(dev_data), len(test_data))
    train_manager = BatchManager(train_data, FLAGS.batch_size)
    test_manager = BatchManager(test_data, 100)
    dev_manager = BatchManager(dev_data, 100)

    make_path(FLAGS)
    if os.path.isfile(FLAGS.config_file):
        config = load_config(FLAGS.config_file)
    else:
        config = config_model(char_to_id)
        save_config(config, FLAGS.config_file)
    make_path(FLAGS)
    log_path = os.path.join("log", FLAGS.log_file)
    logger = get_logger(log_path)
    print_config(config, logger)
    os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9)
    tf_config = tf.ConfigProto(gpu_options=gpu_options)
    tf_config.gpu_options.allow_growth = True

    steps_per_epoch = train_manager.len_data

    with tf.Session(config=tf_config) as sess:
        model = create_model(sess, Model, FLAGS.ckpt_path, load_word2vec,
                             config, id_to_char, logger)
        logger.info("start training")
        loss = []
        best = 0
        # sess.graph.finalize()
        for i in range(50):
            for batch in train_manager.iter_batch(shuffle=True):
                step, batch_loss = model.run_step(sess, True, batch)
                loss.append(batch_loss)
                if step % FLAGS.steps_check == 0:
                    iteration = step // steps_per_epoch + 1
                    logger.info("iteration:{} step:{}/{},".format(
                        iteration, step % steps_per_epoch, steps_per_epoch))
                    loss = []
            Acc_result = evaluate(sess, model, "dev", dev_manager, logger)
            logger.info("Acc{}".format(Acc_result))
            logger.info("test")
            # precision, recall, f1_score = model.evaluete_(sess,test_manager)
            # logger.info("P, R, F,{},{},{}".format(precision, recall, f1_score))
            test_result = evaluate(sess, model, "test", test_manager, logger)
            if test_result > best:
                best = test_result
                save_model(sess, model, FLAGS.ckpt_path, logger)
        X_hold = X_hold.squeeze(1)  #batch_size,N_hold,3
        #X_eval=X_eval.squeeze(1) #batch_size,N_eval,3

        #extract set representation
        out, _, _ = dict_model['PointNet'](X_hold.permute(
            0, 2, 1))  #out: batch, 1024
        set_rep = dict_model['FeatureCompression'](
            out)  #set_rep: batch, dim_rep

        X_rec = dict_model['DecoderAE'](set_rep).reshape(P, N_hold, 3)

        dist_1, dist_2 = chamfer_dist(X_hold, X_rec)
        loss = (torch.mean(dist_1)) + (torch.mean(dist_2))

        loss.backward()
        #ut.clip_grad(lst_model,5)
        optimizer.step()

        pbar.set_postfix(loss='{:.2e}'.format(loss.item()))
        pbar.update(1)

############################################
# save model and log result
############################################
print('*' * 20 + 'Saving models and logging results' + '*' * 20)
for model_name, model in dict_model.items():
    path = dir_save_model + args.name + '_' + model_name + '.pkl'
    ut.save_model(model, path)

lst_result = [args.ind, args.name, '{:.2e}'.format(loss.item())]
ut.write_list_to_csv(lst_result, dir_log)
示例#31
0
def main(args):
    paddle.seed(12345)
    config = load_yaml(args.config_yaml)
    use_gpu = config.get("dygraph.use_gpu", False)
    train_data_dir = config.get("dygraph.train_data_dir", None)
    epochs = config.get("dygraph.epochs", None)
    print_interval = config.get("dygraph.print_interval", None)
    model_save_path = config.get("dygraph.model_save_path", "model_output")
    batch_size = config.get('dygraph.batch_size_train', None)
    margin = config.get('hyper_parameters.margin', 0.1)
    query_len = config.get('hyper_parameters.query_len', 79)
    pos_len = config.get('hyper_parameters.pos_len', 99)
    neg_len = config.get('hyper_parameters.neg_len', 90)

    print("***********************************")
    logger.info(
        "use_gpu: {}, train_data_dir: {}, epochs: {}, print_interval: {}, model_save_path: {}"
        .format(use_gpu, train_data_dir, epochs, print_interval,
                model_save_path))
    print("***********************************")

    place = paddle.set_device('gpu' if use_gpu else 'cpu')

    simnet_model = create_model(config)
    model_init_path = config.get("dygraph.model_init_path", None)
    if model_init_path is not None:
        load_model(model_init_path, simnet_model)

    # to do : add optimizer function
    learning_rate = config.get("hyper_parameters.optimizer.learning_rate",
                               0.001)
    optimizer = paddle.optimizer.Adam(learning_rate=learning_rate,
                                      parameters=simnet_model.parameters())

    # to do init model
    file_list = [
        os.path.join(train_data_dir, x) for x in os.listdir(train_data_dir)
    ]
    print("read data")
    dataset = BQDataset(file_list)
    train_dataloader = create_data_loader(dataset, place=place, config=config)

    last_epoch_id = config.get("last_epoch", -1)

    for epoch_id in range(last_epoch_id + 1, epochs):
        # set train mode
        simnet_model.train()
        epoch_begin = time.time()
        interval_begin = time.time()
        train_reader_cost = 0.0
        train_run_cost = 0.0
        total_samples = 0
        reader_start = time.time()

        for batch_id, batch in enumerate(train_dataloader()):
            train_reader_cost += time.time() - reader_start
            optimizer.clear_grad()
            train_start = time.time()
            batch_size = batch_size

            inputs = create_feeds(batch, query_len, pos_len, neg_len)

            cos_pos, cos_neg = simnet_model(inputs, False)
            loss = create_loss(batch_size, margin, cos_pos, cos_neg)
            acc = get_acc(cos_neg, cos_pos, batch_size)

            loss.backward()
            optimizer.step()
            train_run_cost += time.time() - train_start
            total_samples += batch_size

            if batch_id % print_interval == 0:
                logger.info(
                    "epoch: {}, batch_id: {}, acc: {}, loss: {}, avg_reader_cost: {:.5f} sec, avg_batch_cost: {:.5f} sec, avg_samples: {:.5f}, ips: {:.5f} images/sec"
                    .format(
                        epoch_id, batch_id, acc.numpy(), loss.numpy(),
                        train_reader_cost / print_interval,
                        (train_reader_cost + train_run_cost) / print_interval,
                        total_samples / print_interval,
                        total_samples / (train_reader_cost + train_run_cost)))
                train_reader_cost = 0.0
                train_run_cost = 0.0
                total_samples = 0
            reader_start = time.time()

        logger.info(
            "epoch: {} done, acc: {}, loss: {}, : epoch time{:.2f} s".format(
                epoch_id, acc.numpy(), loss.numpy(),
                time.time() - epoch_begin))

        save_model(simnet_model,
                   optimizer,
                   model_save_path,
                   epoch_id,
                   prefix='rec')
示例#32
0
            vae.get_layer('encoder').predict(input_images)[0])
        plot_image(img_renorm(input_images), img_renorm(rec_images))


# selected_pm_layers = ['Conv2d_1a_3x3','Conv2d_3b_1x1', 'Conv2d_4b_3x3', 'add_5', 'add_15', 'add_21', 'Bottleneck']

# selected for calculating the perceptual loss.
selected_pm_layers = [
    'Conv2d_1a_3x3', 'Conv2d_2b_3x3', 'Conv2d_4a_3x3', 'Conv2d_4b_3x3',
    'Bottleneck'
]
vae_dfc = train(selected_pm_layers,
                alpha=1,
                latent_dim=1024,
                learning_rate=0.00033)
save_model(vae_dfc, 'face-vae' + str(time.time()))
#vae_dfc = load_model('face-vae-final')
test_vae(vae_dfc)
'''
vae_dfc = train(selected_pm_layers, alpha = 1, latent_dim = 1024)
save_model(vae_dfc, 'face-vae' + str(time.time()))
test_vae(vae_dfc)



vae_dfc = train(selected_pm_layers)
save_model(vae_dfc, 'face-vae' + str(time.time()))
test_vae(vae_dfc)


示例#33
0
def main():
    parser = argparse.ArgumentParser(description="-----[CNN-classifier]-----")
    parser.add_argument(
        "--mode",
        default="train",
        help="train: train (with test) a model / test: test saved models")
    parser.add_argument(
        "--model",
        default="rand",
        help="available models: rand, static, non-static, multichannel")
    parser.add_argument(
        "--datafile",
        default="None",
        help=
        "data file base to read in different datset (needs training, valid, and test files)"
    )
    parser.add_argument("--dataset",
                        default="TREC",
                        help="available datasets: MR, TREC")
    parser.add_argument("--save_model",
                        default=False,
                        action='store_true',
                        help="whether saving model or not")
    parser.add_argument("--early_stopping",
                        default=False,
                        action='store_true',
                        help="whether to apply early stopping")
    parser.add_argument("--epoch",
                        default=100,
                        type=int,
                        help="number of max epoch")
    parser.add_argument("--learning_rate",
                        default=1.0,
                        type=float,
                        help="learning rate")
    parser.add_argument("--gpu",
                        default=-1,
                        type=int,
                        help="the number of gpu to be used")
    parser.add_argument("--cands",
                        default="None",
                        help="candidate outputs file")
    parser.add_argument("--outfile",
                        default="None",
                        help="output file to write complexity predictions to")

    options = parser.parse_args()
    if options.datafile == "None":
        data = getattr(utils, f"read_{options.dataset}")()
    else:
        data = utils.read_other(options.datafile)


#    data["vocab"] = sorted(list(set([w for sent in data["train_x"] + data["dev_x"] + data["test_x"] for w in sent])))
    data["vocab"] = sorted(
        list(set([w for sent in data["train_x"] for w in sent])))
    #    data["classes"] = sorted(list(set(data["train_y"])))
    data["word_to_idx"] = {w: i for i, w in enumerate(data["vocab"])}
    data["idx_to_word"] = {i: w for i, w in enumerate(data["vocab"])}

    params = {
        "MODEL":
        options.model,
        "DATASET":
        options.dataset,
        "DATAFILE":
        options.datafile,
        "SAVE_MODEL":
        options.save_model,
        "EARLY_STOPPING":
        options.early_stopping,
        "EPOCH":
        options.epoch,
        "LEARNING_RATE":
        options.learning_rate,
        "MAX_SENT_LEN":
        max([
            len(sent)
            for sent in data["train_x"] + data["dev_x"] + data["test_x"]
        ]),
        "BATCH_SIZE":
        50,
        "WORD_DIM":
        300,
        "VOCAB_SIZE":
        len(data["vocab"]),
        #        "CLASS_SIZE": len(data["classes"]),
        "FILTERS": [3, 4, 5],
        "FILTER_NUM": [100, 100, 100],
        "DROPOUT_PROB":
        0.5,
        "NORM_LIMIT":
        3,
        "GPU":
        options.gpu,
        "OUTFILE":
        options.outfile
    }

    print("=" * 20 + "INFORMATION" + "=" * 20)
    print("MODEL:", params["MODEL"])
    if options.datafile == "None":
        print("DATASET:", params["DATASET"])
    else:
        print("DATAFILE:", params["DATAFILE"])
    print("VOCAB_SIZE:", params["VOCAB_SIZE"])
    print("EPOCH:", params["EPOCH"])
    print("LEARNING_RATE:", params["LEARNING_RATE"])
    print("EARLY_STOPPING:", params["EARLY_STOPPING"])
    print("SAVE_MODEL:", params["SAVE_MODEL"])
    print("=" * 20 + "INFORMATION" + "=" * 20)

    if options.mode == "train":
        print("=" * 20 + "TRAINING STARTED" + "=" * 20)
        model = train(data, params)
        if params["SAVE_MODEL"]:
            utils.save_model(model, params)
        print("=" * 20 + "TRAINING FINISHED" + "=" * 20)
    else:
        candidates_file = options.cands
        model = utils.load_model(params).cuda(params["GPU"])
        preds = get_preds(candidates_file, data, model, params)

        print(preds[0])
        print(len(preds))
        print(len(preds[0]))

        with open(options.outfile, 'w', encoding='utf8') as f:
            for ps in preds:
                f.write("\t".join([str(p) for p in ps]) + "\n")
示例#34
0
def train_model(args):
    model = Model(node_embeddings, args.node_out_dim)

    if CUDA:
        model.cuda()

    if args.is_test:
        model.load_state_dict(
            torch.load('./checkpoints/{0}/trained_{1}.pth'.format(
                args.data, args.test_check)))
        get_test_score(model)
        return

    # NN = getL()

    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay)

    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=1000,
                                                gamma=0.5,
                                                last_epoch=-1)

    # gat_loss_func = torch.nn.BCEWithLogitsLoss()
    gat_loss_func = torch.nn.MSELoss()

    epoch_losses = []  # losses of all epochs
    print("Number of epochs {}".format(args.epochs))

    model.train()

    for epoch in range(args.epochs + 1):
        # print("\nepoch-> ", epoch)
        # print("Training set shuffled, length is ", Corpus_.train_indices.shape)

        random.shuffle(Corpus_.train_edge_data)
        random.shuffle(Corpus_.train_neg_data)

        Corpus_.train_indices = np.array(list(Corpus_.train_edge_data)).astype(
            np.int32)
        Corpus_.train_neg_indices = np.array(list(
            Corpus_.train_neg_data)).astype(np.int32)

        start_time = time.time()
        epoch_loss = []

        if Corpus_.num_nodes % 500 == 0:
            num_iters_per_epoch = Corpus_.num_nodes // 500
        else:
            num_iters_per_epoch = (Corpus_.num_nodes // 500) + 1

        for iters in range(num_iters_per_epoch):
            start_time_iter = time.time()
            train_indices, train_indices_neg = Corpus_.get_iteration_batch(0)

            if CUDA:
                train_indices = Variable(
                    torch.LongTensor(train_indices)).cuda()
                train_indices_neg = Variable(
                    torch.LongTensor(train_indices_neg)).cuda()
            else:
                train_indices = Variable(torch.LongTensor(train_indices))

            optimizer.zero_grad()

            node_embeds = model()

            loss = batch_gat_loss(gat_loss_func, train_indices,
                                  train_indices_neg, node_embeds)

            if SP_LOSS == True:
                neighbor_spectrum_loss = get_neighbor_spectrum_loss(iters, Corpus_.neighbors, \
                       Corpus_.neighbors_count, node_embeds, num_iters_per_epoch)
                (loss +
                 float(args.regterm) * neighbor_spectrum_loss).backward()
            else:
                loss.backward()

            optimizer.step()

            epoch_loss.append(loss.data.item())

            end_time_iter = time.time()

            # print("Iteration-> {0}  , Iteration_time-> {1:.4f} , Iteration_loss {2:.4f}".format(
            # 	iters, end_time_iter - start_time_iter, loss.data.item()))

        scheduler.step()
        # if epoch % 100 == 0:
        print("Epoch {} , average loss {} , epoch_time {}\n".format(
            epoch,
            sum(epoch_loss) / len(epoch_loss),
            time.time() - start_time))
        epoch_losses.append(sum(epoch_loss) / len(epoch_loss))

        if epoch > 0 and epoch % 100 == 0:
            save_model(model, epoch, args.data)

    model.load_state_dict(
        torch.load('./checkpoints/{0}/trained_{1}.pth'.format(
            args.data, args.epochs)))
    get_test_score(model)
示例#35
0
adam = Adam(lr=1e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08)

model.compile(optimizer=adam, loss='mse')

#validation data generation
valid_gen = utils.validation_generator(df)

for iteration in range(iterations):

    train_gen = utils.train_generator(df, batch_size)
    history = model.fit_generator(train_gen,
                                  samples_per_epoch=256 * 79,
                                  nb_epoch=1,
                                  validation_data=valid_gen,
                                  nb_val_samples=len(df))

    utils.save_model('model_' + str(iteration) + '.json',
                     'model_' + str(iteration) + '.h5', model)

    val_loss = history.history['val_loss'][0]
    if val_loss < val_best:
        best_model = iteration
        val_best = val_loss
        utils.save_model('model.json', 'model.h5', model)

    pr_threshold = 1 / (iteration + 1)

print('Best model found at iteration # ' + str(best_model))
print('Best Validation score : ' + str(np.round(val_best, 4)))

gc.collect()
def train(model, train_dataloader, val_dataloader, criterion, optimizer,
          scheduler, device, writer, args):
    total_data = len(train_dataloader.dataset)
    iterations_per_epoch = np.floor(total_data / args.batch_size)
    epochs = int(np.floor(args.iterations / iterations_per_epoch))

    steps = 0
    stop = False
    best_loss = np.inf
    best_error = np.inf

    for epoch in tqdm(range(epochs), leave=False, desc='training'):
        model.train()

        for imgs, labels in train_dataloader:
            imgs, labels = imgs.to(device), labels.to(device)

            optimizer.zero_grad()
            outputs = model(imgs)
            train_loss = criterion(outputs, labels)

            train_loss.backward()
            optimizer.step()
            scheduler.step(steps)

            writer.add_scalar('train loss', train_loss.item(), steps)
            writer.add_scalar('lr', optimizer.param_groups[0]['lr'], steps)

            if hasattr(model, 'norm'):
                if steps % args.recode_norm == 0:
                    writer.add_scalar('norm', model.norm, steps)

            if hasattr(model, 'transform'):
                if steps % args.record_image == 0:
                    ori_imgs = imgs[:10]  # (10, 1, W, H)
                    aug_imgs = model.transform(ori_imgs)  # (10, 1, H, W)
                    all_imgs = torch.cat((ori_imgs, aug_imgs),
                                         dim=0)  # (20, 1, H, W)
                    all_imgs = make_grid(all_imgs, nrow=10)  # (3, H, W)

                    writer.add_image('Original / Transformed', all_imgs, steps)

            steps += 1

            if steps == args.iterations:
                stop = True
                break

        val_loss, val_error = evaluate(model, val_dataloader, criterion,
                                       device, writer, args, epoch)
        print('Epoch {:2d}: val loss = {:.4f}, val error = {:.4f}'.format(
            epoch, val_loss, val_error))

        if best_loss > val_loss or best_error > val_error:
            best_loss = val_loss if best_loss > val_loss else best_loss
            best_error = val_error if best_error > val_error else best_error

            save_model(model, args, MODELS_DIR)

        if stop:
            print('training done!')
            break

    model = load_model(model, args, MODELS_DIR)

    return model
示例#37
0
文件: run-bpr.py 项目: Dellen/poi
 def hook(model):
     eva.assess(model)
     save_model(model, "./model/model_%s_%i.pkl" % (mdname, model.current))
示例#38
0
def train(net, train_data_loader, test_data_loader,device,num_epochs=10):
    args = get_args()
    net.to(device)
    batch_size = train_data_loader.batch_size
    if args.target_mode == 'cont':
        criterion = nn.MSELoss()
    else:
        class_weights = _get_class_weights(train_data_loader.dataset.get_labels_hisotgram())
        class_weights = torch.from_numpy(class_weights).float().to(device)
        criterion = nn.CrossEntropyLoss(ignore_index=0, weight=class_weights)
        # criterion = nn.CrossEntropyLoss(ignore_index=0)

    optimizer = optim.Adam(net.parameters(), lr=LR0)
    optimizer.zero_grad()

    net.zero_grad()
    train_epochs_loss = []
    val_epochs_loss = []
    train_steps_per_e = len(train_data_loader.dataset) // batch_size
    val_steps_per_e   = len(test_data_loader.dataset) // batch_size
    best_loss = 1e5
    for e in range(num_epochs):
        print ("Epoch: ", e)
        net = net.train()
        val_loss_sum = 0.
        train_loss_sum = 0
        for i, data in enumerate(train_data_loader):
            x,_,y,_ = data
            x = x.to(device)
            y = y.to(device)
            out = net(x)
            optimizer.zero_grad()
            loss = criterion(out, y)
            loss.backward()
            optimizer.step()
            if i%10 == 0:
                print('Step: {:3} / {:3} Train loss: {:3.3}'.format(i, train_steps_per_e,loss.item()))
                # _show_examples(x, y, out, num_of_examples=1)
            train_loss_sum += loss.item()
        train_epochs_loss += [train_loss_sum / train_steps_per_e]
        conf_mat = torch.zeros((16, 16), dtype=torch.long)
        net = net.eval()
        for i, val_data in enumerate(test_data_loader):
            x,_,y,_ = val_data
            x, y = x.to(device), y.to(device)
            with torch.no_grad():
                out = net(x)
            loss = criterion(out, y)
            val_loss_sum += loss.item()
            conf_mat = _conf_matrix(out,y,conf_mat)
        val_epochs_loss += [val_loss_sum / val_steps_per_e]
        # _show_examples(x,y,out,num_of_examples=1)
        if val_epochs_loss[-1] < best_loss:
            print ("Saving Model")
            save_model(net, epoch=e, experiment_name=get_args().experiment_name)
            print (conf_mat)
            best_loss = val_epochs_loss[-1]
        acc, acc_top3 = _acc(conf_mat)
        print("\nepoch {:3} Train Loss: {:1.5} Val loss: {:1.5} Acc: {:1.5} Top3 Acc: {:1.5}".format(e, train_epochs_loss[-1],
                                                                                                     val_epochs_loss[-1], acc, acc_top3))
    return train_epochs_loss, val_epochs_loss
示例#39
0
    utils.load_model(
        args.name,
        args.modality2 + 'net_instance_' + str(args.instance) + '.pth.tar'),
    'stream2')
stream1Dict.update(stream2Dict)
state = model.state_dict()
state.update(stream1Dict)
model.load_state_dict(state)

model.to(device)
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=args.learningRate)

# Training and Inference
print('Training Two Stream (' + args.modality1 + '-' + args.modality2 +
      ') Network...')
bestAcc = 0
for epoch in tqdm(range(args.nEpochs)):
    engine.twoStreamTrain(model, trainLoader, optimizer, criterion, device,
                          plotter, epoch)
    accuracy, classWiseAcc = engine.twoStreamValidate(model, valLoader,
                                                      criterion, device,
                                                      plotter, epoch)
    if accuracy > bestAcc:
        utils.save_model({'stateDict': model.state_dict()}, args.name,
                         'twoStreamNet_' + args.modality1 + '-' +
                         args.modality2 + '_instance_' + str(args.instance) +
                         '.pth.tar')
        bestAcc = accuracy
print('Best Accuracy: ' + str(bestAcc))
# print('Classwise Accuracy: '+str(classWiseAcc))
示例#40
0
    def train(self):
        # load data sets
        train_sentences = load_sentences(self.train_file, self.lower,
                                         self.zeros)
        dev_sentences = load_sentences(self.dev_file, self.lower, self.zeros)
        test_sentences = load_sentences(self.test_file, self.lower, self.zeros)

        # Use selected tagging scheme (IOB / IOBES)
        update_tag_scheme(train_sentences, self.tag_schema)
        update_tag_scheme(test_sentences, self.tag_schema)

        # create maps if not exist
        if not os.path.isfile(self.map_file):
            # create dictionary for word
            if self.pre_emb:
                dico_chars_train = char_mapping(train_sentences, self.lower)[0]
                dico_chars, char_to_id, id_to_char = augment_with_pretrained(
                    dico_chars_train.copy(), self.emb_file,
                    list(
                        itertools.chain.from_iterable([[w[0] for w in s]
                                                       for s in test_sentences
                                                       ])))
            else:
                _c, char_to_id, id_to_char = char_mapping(
                    train_sentences, self.lower)

            # Create a dictionary and a mapping for tags
            _t, tag_to_id, id_to_tag = tag_mapping(train_sentences)
            with open(self.map_file, "wb") as f:
                pickle.dump([char_to_id, id_to_char, tag_to_id, id_to_tag], f)
        else:
            with open(self.map_file, "rb") as f:
                char_to_id, id_to_char, tag_to_id, id_to_tag = pickle.load(f)

        # prepare data, get a collection of list containing index
        train_data = prepare_dataset(train_sentences, char_to_id, tag_to_id,
                                     self.lower)
        dev_data = prepare_dataset(dev_sentences, char_to_id, tag_to_id,
                                   self.lower)
        test_data = prepare_dataset(test_sentences, char_to_id, tag_to_id,
                                    self.lower)
        print("%i / %i / %i sentences in train / dev / test." %
              (len(train_data), 0, len(test_data)))

        train_manager = BatchManager(train_data, self.batch_size)
        dev_manager = BatchManager(dev_data, 100)
        test_manager = BatchManager(test_data, 100)
        # make path for store log and model if not exist
        # make_path(FLAGS)
        if os.path.isfile(self.config_file):
            config = load_config(self.config_file)
        else:
            config = self.config_model(char_to_id, tag_to_id)
            save_config(config, self.config_file)

        log_path = os.path.join("log", self.log_file)
        logger = get_logger(log_path)
        print_config(config, logger)

        # limit GPU memory
        tf_config = tf.ConfigProto()
        tf_config.gpu_options.allow_growth = True
        steps_per_epoch = train_manager.len_data
        with tf.Session(config=tf_config) as sess:
            model = create_model(sess, Model, self.ckpt_path, load_word2vec,
                                 config, id_to_char, logger)
            logger.info("start training")
            loss = []
            for i in range(100):
                for batch in train_manager.iter_batch(shuffle=True):
                    step, batch_loss = model.run_step(sess, True, batch)
                    loss.append(batch_loss)
                    if step % self.steps_check == 0:
                        iteration = step // steps_per_epoch + 1
                        logger.info("iteration:{} step:{}/{}, "
                                    "NER loss:{:>9.6f}".format(
                                        iteration, step % steps_per_epoch,
                                        steps_per_epoch, np.mean(loss)))
                        loss = []

                best = self.evaluate(sess, model, "dev", dev_manager,
                                     id_to_tag, logger)
                if best:
                    save_model(sess, model, self.ckpt_path, logger)
                self.evaluate(sess, model, "test", test_manager, id_to_tag,
                              logger)
示例#41
0
def train(train_index, test_index,locations,data,wsize,nb_epoch = 50,batch_size = 50,neurons_full_layer = 100):
    n,m = data.shape
    
    print len(train_index),len(test_index)
    
    n_training = len(train_index)
    n_testing = len(test_index)

    nwsize = wsize*2+1

    model = make_model_2(m, nwsize[0], nwsize[1],nwsize[2],neurons_full_layer)

    X_training = None
    y_training = None
    X_testing = None
    y_testing = None

    if os.path.exists("X_training.npy"):
        X_training = np.load("X_training.npy")
    if os.path.exists("y_training.npy"):
        y_training = np.load("y_training.npy")

    if os.path.exists("X_testing.npy"):
        X_testing = np.load("X_testing.npy")
    if os.path.exists("y_testing.npy"):
        y_testing = np.load("y_testing.npy")

    if X_training is None:
        X_training = np.empty((n_training,m,nwsize[0],nwsize[1],nwsize[2]))
        y_training = np.empty((n_training,m))
        X_testing = np.empty((n_testing,m,nwsize[0],nwsize[1],nwsize[2]))
        y_testing = np.empty((n_testing,m))
        
        kdtree = cKDTree(locations)
        k = 1000
        
        print "processing training data"
        for i,index in enumerate(train_index):
            location = locations[index]
            image = preprocess.create_image_from_neighbours_3d(location,locations,data,k,kdtree,(wsize[0],wsize[1],wsize[2]),(10.0,10.0,10.0),distance=np.inf)
            X_training[i,:,:,:,:] = image
            y_training[i,:] = data[index,:]
        
        print "processing testing data"
        for i,index in enumerate(test_index):
            location = locations[index]
            image = preprocess.create_image_from_neighbours_3d(location,locations,data,k,kdtree,(wsize[0],wsize[1],wsize[2]),(10.0,10.0,10.0),distance=np.inf)
            X_testing[i,:,:,:,:] = image
            y_testing[i,:] = data[index,:]
            
        np.save("X_training",X_training)
        np.save("y_training",y_training)
        np.save("X_testing",X_testing)
        np.save("y_testing",y_testing)
    else:
        pass
        

    history = model.fit(X_training, y_training,
        batch_size=batch_size, nb_epoch=nb_epoch,
        verbose=1, validation_data=(X_testing, y_testing))
        
    utils.save_model(model,"muestras-model-cnn")        
                        
    score = model.predict(X_testing)
    
    print np.mean(y_testing[:,0])
    print np.mean(score[:,0])
    print np.std(y_testing[:,0])
    print np.std(score[:,0])

    print "r2", sklearn.metrics.r2_score(score, y_testing)
示例#42
0
    # Setup hidden layers
    model.add(
        Dense(6, activation='relu', kernel_initializer='uniform',
              input_dim=11))
    model.add(Dense(6, activation='relu', kernel_initializer='uniform'))
    model.add(Dense(1, activation='sigmoid', kernel_initializer='uniform'))

    # Compile the model with given optimizer and loss function
    model.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])
    return model


if __name__ == "__main__":
    scaler = StandardScaler()
    X, y = read_data()
    X_train, X_test, y_train, y_test = split_and_normalize(X, y, scaler)

    classifier = build_classifier()
    history = classifier.fit(x=X_train, y=y_train, batch_size=10, epochs=100)
    save_model(classifier, scaler, "models", prefix="v1")
    plot_history(history)

    y_pred_train = classifier.predict(X_train) > 0.5
    y_pred_test = classifier.predict(X_test) > 0.5

    acc_train = accuracy(y_train, y_pred_train) * 100
    acc_test = accuracy(y_test, y_pred_test) * 100

    print("Accuracy (train): {}%".format(str(acc_train)))
    print("Accuracy (test) : {}%".format(str(acc_test)))
    print("Dividing data...")
    data = prepare_data.PrepareData(maximum_path_length, test_share)
    train_data, train_labels, test_data, test_labels, data_dictionary, reverse_dictionary, class_weights, train_sample_weights, usage_pred = data.get_data_labels_matrices(
        workflow_paths, frequency_paths, tool_usage_path, cutoff_date)

    # find the best model and start training
    predict_tool = PredictTool()

    # start training with weighted classes
    print("Training with weighted classes and samples ...")
    results_weighted = predict_tool.find_train_best_network(
        config, optimise_parameters_node, reverse_dictionary, train_data,
        train_labels, test_data, test_labels, val_share, n_epochs,
        class_weights, usage_pred, train_sample_weights, compatible_next_tools,
        hyperparameter_optimize)
    utils.save_model(results_weighted, data_dictionary, compatible_next_tools,
                     trained_model_path)

    # print loss and precision
    print()
    print("Training loss")
    print(results_weighted["train_loss"])
    print()
    print("Validation loss")
    print(results_weighted["validation_loss"])
    print()
    print("Test absolute precision")
    print(results_weighted["test_absolute_precision"])
    print()
    print("Test compatible precision")
    print(results_weighted["test_compatible_precision"])
    print()
示例#44
0
def test_mlp(learning_rate=0.01, L2_reg=0.00000001, n_epochs=200,
             dataset='theano.classifier.data', batch_size=10000, max_iter = 5000, output='theano.model.out', validation_freq = 100):
    """
    Demonstrate stochastic gradient descent optimization for a multilayer
    perceptron

    This is demonstrated on MNIST.

    :type learning_rate: float
    :param learning_rate: learning rate used (factor for the stochastic
    gradient

    :type L2_reg: float
    :param L2_reg: L2-norm's weight when added to the cost (see
    regularization)

    :type n_epochs: int
    :param n_epochs: maximal number of epochs to run the optimizer

    :type dataset: string
    :param dataset: the path to the theano.classifier.data


   """
    print (" Learning with params : ")
    print (" Learning rate : " + str(learn_rate)); 
    print (" Regularlization params : " + str(L2_reg))
    print (" Batch size : "  + str(batch_size))
    print (" Max Iter : " + str(max_iter))
    print (" Epochs : " + str(n_epochs))
    print (" Evaluation frequency  : " + str(validation_freq))
    
    print ('... loading data ')
    datasets = load_data(dataset)

    train_set_x, train_set_y = datasets[0]
    valid_set_x, valid_set_y = datasets[1]
    E = datasets[2]
    W1 = datasets[3]
    B1 = datasets[4]
    W2 = datasets[5]

    # compute number of minibatches for training, validation and testing
    
    n_train_batches = train_set_x.owner.inputs[0].get_value(borrow=True).shape[0] / batch_size
    n_valid_batches = valid_set_x.owner.inputs[0].get_value(borrow=True).shape[0] / batch_size
    
    print 'Training batches : ' + str(n_train_batches) 
    print 'Valid batches : ' + str(n_valid_batches)
    
    ######################
    # BUILD ACTUAL MODEL #
    ######################
    print '... building the model'

    # allocate symbolic variables for the data
    index = T.lscalar()  # index to a [mini]batch
    x = T.imatrix('x')  # the data is presented as rasterized images
    y = T.ivector('y')  # the labels are presented as 1D vector of
                        # [int] labels
    rng = numpy.random.RandomState(1234)
    
    # construct the MLP class
    classifier = MLP(rng,
        input=x,
        E=E,
        W1=W1,
        B1=B1,
        W2 = W2, 
    )

    train_errors = (classifier.errors(y))    
    cost = (
        classifier.negative_log_likelihood(y)
        + L2_reg * classifier.L2_sqr
    )
    

    # end-snippet-4

    # compiling a Theano function that computes the mistakes that are made
    # by the model on a minibatch
    validate_model = theano.function(
        inputs=[index],
        outputs=classifier.errors(y),
        givens={
            x: valid_set_x[index * batch_size:(index + 1) * batch_size],
            y: valid_set_y[index * batch_size:(index + 1) * batch_size]
        }
    )

    # compute the gradient of cost with respect to theta 
    gparams = [T.grad(cost, param) for param in classifier.params]

    # specify how to update the parameters of the model as a list of
    # (variable, update expression) pairs

    updates = [
        (param, param - learning_rate * gparam)
        for param, gparam in zip(classifier.params, gparams)
    ]

    # compiling a Theano function `train_model` that returns the cost, but
    # in the same time updates the parameter of the model based on the rules
    # defined in `updates`
    train_model = theano.function(
        inputs=[index],
        outputs=(cost, train_errors),
        updates=updates,
        givens={
            x: train_set_x[index * batch_size: (index + 1) * batch_size],   # x,y here is symbolic variable 
            y: train_set_y[index * batch_size: (index + 1) * batch_size],
        }
    )
    # end-snippet-5

    ###############
    # TRAIN MODEL #
    ###############
    print '... training (using drop-out = 0.5)'

    # early-stopping parameters
    patience = 100000  # Long Duong : At least have to went through this much examples 
    patience_increase = 2  # wait this much longer when a new best is found
    improvement_threshold = 0.995  # a relative improvement of this much is
                                   # considered significant
    #validation_frequency = min(n_train_batches, patience / 2)
                                  # go through this many
                                  # minibatche before checking the network
                                  # on the validation set; in this case we
                                  # check every epoch
    validation_frequency = validation_freq
    
    ######## FOR TESTING ONLY ##################
    #validation_frequency = 5 
    #n_train_batches = 10 
    #n_epochs = 1 
    ######################################
    
    
    best_validation_loss = numpy.inf
    best_iter = 0
    test_score = 0.
    start_time = time.clock()

    epoch = 0
    done_looping = False

    while (epoch < n_epochs) and (not done_looping):
        epoch = epoch + 1
        for minibatch_index in xrange(n_train_batches):
            
            (minibatch_avg_cost, minibatch_avg_error) = train_model(minibatch_index)
            # iteration number
            iter = (epoch - 1) * n_train_batches + minibatch_index
            print (' Iteration :  ' + str(iter) + ' with Cost = ' + str(minibatch_avg_cost) + '  with errors = ' + str(minibatch_avg_error))
            # Long Duong : since in each epoch => n_train_batches has covered 
            # iter : is the number of update for the parameters (~ number of batches considered) 

            if (iter + 1) % validation_frequency == 0:
                # compute zero-one loss on validation set
                validation_losses = [validate_model(i) for i
                                     in xrange(n_valid_batches)]
                this_validation_loss = numpy.mean(validation_losses)

                print(
                    'epoch %i, minibatch %i/%i, validation error %f %%' %
                    (
                        epoch,
                        minibatch_index + 1,
                        n_train_batches,
                        this_validation_loss * 100.
                    )
                )

                # if we got the best validation score until now
                if this_validation_loss < best_validation_loss:
                    #improve patience if loss improvement is good enough
                    if (this_validation_loss < best_validation_loss * improvement_threshold):
                        # Long Duong : this is the key : need iter to get this good result => Waiting this much iter to expect 
                        # other better result ....  
                        patience = max(patience, iter * patience_increase)

                    best_validation_loss = this_validation_loss
                    best_iter = iter
                    # Save the model (especially for dropout model) 
                    save_model(output,classifier.embeddingLayer.E.get_value(), 
                                       classifier.hiddenLayer.W.get_value().T, 
                                       classifier.hiddenLayer.b.get_value(), 
                                       classifier.logRegressionLayer.W.get_value().T)

            # Long Duong : add max_iter criterion 
            if (patience <= iter) or (iter > max_iter) :
                done_looping = True
                break
            
    end_time = time.clock()
    print(('Optimization complete. Best validation score of %f %% '
           'obtained at iteration %i') %
          (best_validation_loss * 100., best_iter + 1))
    print >> sys.stderr, ('The code for file ' +
                          os.path.split(__file__)[1] +
                          ' ran for %.2fm' % ((end_time - start_time) / 60.))
示例#45
0
def main(args):
    if not check_exists(args.save_dir):
        os.makedirs(args.save_dir)

    dataset = IQiYiFineTuneSceneDataset(args.data_root,
                                        'train+val-noise',
                                        image_root='/home/dcq/img')

    data_loader = DataLoader(dataset,
                             batch_size=args.batch_size,
                             shuffle=True,
                             num_workers=4)

    log_step = len(data_loader) // 10 if len(data_loader) > 10 else 1

    model = ArcFaceSEResNeXtModel(args.num_classes, include_top=True)
    metric_func = ArcMarginProduct()
    loss_func = FocalLoss(gamma=2.)

    trainable_params = [
        {
            'params': model.base_model.parameters(),
            "lr": args.learning_rate / 100
        },
        {
            'params': model.weight
        },
    ]

    optimizer = optim.SGD(trainable_params,
                          lr=args.learning_rate,
                          momentum=0.9,
                          weight_decay=1e-5)
    lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer, args.epoch)

    device, device_ids = prepare_device()
    model = model.to(device)
    if len(device_ids) > 1:
        model = torch.nn.DataParallel(model, device_ids=device_ids)

    for epoch_idx in range(args.epoch):
        total_loss = .0
        for batch_idx, (images, labels, _) in enumerate(data_loader):
            images = images.view(-1, *images.size()[-3:])
            images = images.to(device)
            labels = labels.to(device)

            optimizer.zero_grad()

            outputs = model(images)
            outputs = outputs.view(outputs.size(0) // 3, 3, -1)
            outputs = torch.mean(outputs, dim=1)
            outputs_metric = metric_func(outputs, labels)
            local_loss = loss_func(outputs_metric, labels)

            local_loss.backward()
            optimizer.step()

            total_loss += local_loss.item()

            if batch_idx % log_step == 0 and batch_idx != 0:
                print('Epoch: {} [{}/{} ({:.0f}%)] Loss: {:.6f}'.format(
                    epoch_idx, batch_idx * args.batch_size, len(dataset),
                    100.0 * batch_idx / len(data_loader), local_loss.item()))

        log = {
            'epoch': epoch_idx,
            'lr': optimizer.param_groups[0]['lr'],
            'loss': total_loss / len(data_loader)
        }

        for key, value in sorted(log.items(), key=lambda item: item[0]):
            print('    {:20s}: {:6f}'.format(str(key), value))

        lr_scheduler.step()

    save_model(model.module, args.save_dir, 'demo_arcface_fine_tune_model',
               args.epoch)
def train_model(model, data_loader, params):
    # data loader
    train_loader, val_loader, test_loader = data_loader['train'], data_loader[
        'devel'], data_loader['test']
    # criterion
    if params.loss == 'ccc':
        criterion = utils.CCCLoss()
    elif params.loss == 'mse':
        criterion = utils.MSELoss()
    elif params.loss == 'l1':
        criterion = utils.L1Loss()
    elif params.loss == 'tilted':
        criterion = utils.TiltedLoss()
    elif params.loss == 'tiltedCCC':
        criterion = utils.TiltedCCCLoss()
    elif params.loss == 'cwCCC':
        criterion = utils.CCCLossWithStd()
    else:
        raise Exception(f'Not supported loss "{params.loss}".')
    # optimizer
    optimizer = optim.Adam(model.parameters(),
                           lr=params.lr,
                           weight_decay=params.l2_penalty)
    # lr scheduler
    if params.lr_scheduler == 'step':
        lr_scheduler = optim.lr_scheduler.StepLR(optimizer=optimizer,
                                                 step_size=params.lr_patience,
                                                 gamma=params.lr_factor)
    else:
        lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            optimizer=optimizer,
            mode='min',
            patience=params.lr_patience,
            factor=params.lr_factor,
            min_lr=params.min_lr,
            verbose=True if params.log_extensive else False)
    # train
    best_val_loss = float('inf')
    best_val_ccc, best_val_pcc, best_val_rmse = [], [], []
    best_mean_val_ccc = -1
    best_model_file = ''
    early_stop = 0
    for epoch in range(1, params.epochs + 1):

        ################################
        if params.uncertainty_approach == 'cw_ccc':
            train_loss = train_with_std(model, train_loader, criterion,
                                        optimizer, epoch, params)

        else:
            train_loss = train(model, train_loader, criterion, optimizer,
                               epoch, params)
        ################################
        if params.uncertainty_approach == "quantile_regression":
            val_loss, val_ccc, val_pcc, val_rmse = validate_quantile_regression(
                model, val_loader, criterion, params)

        elif params.uncertainty_approach == 'cw_ccc':
            val_loss, val_ccc, val_pcc, val_rmse = validate_std(
                model, val_loader, criterion, params)

        else:
            val_loss, val_ccc, val_pcc, val_rmse = validate(
                model, val_loader, criterion, params)
        ################################

        mean_val_ccc, mean_val_pcc, mean_val_rmse = np.mean(val_ccc), np.mean(
            val_pcc), np.mean(val_rmse)
        if params.log_extensive:
            print('-' * 50)
            print(f'Epoch:{epoch:>3} | [Train] | Loss: {train_loss:>.4f}')
            print(
                f'Epoch:{epoch:>3} |   [Val] | Loss: {val_loss:>.4f} | '
                f'[CCC]: {mean_val_ccc:>7.4f} {[format(x, "7.4f") for x in val_ccc]} | '
                f'PCC: {mean_val_pcc:>.4f} {[format(x, ".4f") for x in val_pcc]} | '
                f'RMSE: {mean_val_rmse:>.4f} {[format(x, ".4f") for x in val_rmse]}'
            )

        if mean_val_ccc > best_mean_val_ccc:
            best_val_ccc = val_ccc
            best_mean_val_ccc = np.mean(best_val_ccc)

            best_model_file = utils.save_model(model, params)
            if params.log_extensive:
                print(
                    f'Epoch:{epoch:>3} | Save best model "{best_model_file}"!')
            best_val_loss, best_val_pcc, best_val_rmse = val_loss, val_pcc, val_rmse  # Note: loss, pcc and rmse when get best val ccc
            early_stop = 0
        else:
            early_stop += 1
            if early_stop >= params.early_stop:
                print(
                    f'Note: target can not be optimized for {params.early_stop}'
                    f' consecutive epochs, early stop the training process!')
                break

        if params.lr_scheduler == 'step':
            lr_scheduler.step()
        else:
            lr_scheduler.step(1 - np.mean(val_ccc))

    best_mean_val_pcc, best_mean_val_rmse = np.mean(best_val_pcc), np.mean(
        best_val_rmse)

    print(
        f'Seed {params.current_seed} | '
        f'Best [Val CCC]:{best_mean_val_ccc:>7.4f} {[format(x, "7.4f") for x in best_val_ccc]}| '
        f'Loss: {best_val_loss:>.4f} | '
        f'PCC: {best_mean_val_pcc:>.4f} {[format(x, ".4f") for x in best_val_pcc]} | '
        f'RMSE: {best_mean_val_rmse:>.4f} {[format(x, ".4f") for x in best_val_rmse]}'
    )

    return best_val_loss, best_val_ccc, best_val_pcc, best_val_rmse, best_model_file
示例#47
0
def main():
    train_df = utils.get_train_df()

    model = randomforest(train_df)
    utils.save_model(model)